本文整理汇总了Golang中github.com/trivago/gollum/core.PluginConfig.GetStreamMap方法的典型用法代码示例。如果您正苦于以下问题:Golang PluginConfig.GetStreamMap方法的具体用法?Golang PluginConfig.GetStreamMap怎么用?Golang PluginConfig.GetStreamMap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/trivago/gollum/core.PluginConfig
的用法示例。
在下文中一共展示了PluginConfig.GetStreamMap方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Configure
// Configure initializes this producer with values from a plugin config.
func (prod *Scribe) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
host := conf.GetString("Address", "localhost:1463")
bufferSizeMax := conf.GetInt("BatchSizeMaxKB", 8<<10) << 1 // 8 MB
prod.category = make(map[core.MessageStreamID]string, 0)
prod.batchSize = conf.GetInt("BatchSizeByte", 8192)
prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
prod.batch = createScribeMessageBatch(bufferSizeMax)
prod.bufferSizeKB = conf.GetInt("ConnectionBufferSizeKB", 1<<10) // 1 MB
prod.category = conf.GetStreamMap("Category", "")
// Initialize scribe connection
prod.socket, err = thrift.NewTSocket(host)
if err != nil {
Log.Error.Print("Scribe socket error:", err)
return err
}
prod.transport = thrift.NewTFramedTransport(prod.socket)
binProtocol := thrift.NewTBinaryProtocol(prod.transport, false, false)
prod.scribe = scribe.NewScribeClientProtocol(prod.transport, binProtocol, binProtocol)
return nil
}
示例2: Configure
// Configure initializes this producer with values from a plugin config.
func (prod *ElasticSearch) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
prod.SetStopCallback(prod.close)
defaultServer := []string{"localhost"}
numConnections := conf.GetInt("Connections", 6)
retrySec := conf.GetInt("RetrySec", 5)
prod.conn = elastigo.NewConn()
prod.conn.Hosts = conf.GetStringArray("Servers", defaultServer)
prod.conn.Domain = conf.GetString("Domain", prod.conn.Hosts[0])
prod.conn.ClusterDomains = prod.conn.Hosts
prod.conn.Port = strconv.Itoa(conf.GetInt("Port", 9200))
prod.conn.Username = conf.GetString("User", "")
prod.conn.Password = conf.GetString("Password", "")
prod.indexer = prod.conn.NewBulkIndexerErrors(numConnections, retrySec)
prod.indexer.BufferDelayMax = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
prod.indexer.BulkMaxBuffer = conf.GetInt("BatchSizeByte", 32768)
prod.indexer.BulkMaxDocs = conf.GetInt("BatchMaxCount", 128)
prod.indexer.Sender = func(buf *bytes.Buffer) error {
_, err := prod.conn.DoCommand("POST", "/_bulk", nil, buf)
if err != nil {
Log.Error.Print("ElasticSearch response error - ", err)
}
return err
}
prod.index = conf.GetStreamMap("Index", "")
prod.msgType = conf.GetStreamMap("Type", "log")
prod.msgTTL = conf.GetString("TTL", "")
prod.dayBasedIndex = conf.GetBool("DayBasedIndex", false)
prod.counters = make(map[string]*int64)
prod.lastMetricUpdate = time.Now()
for _, index := range prod.index {
shared.Metric.New(elasticMetricMessages + index)
shared.Metric.New(elasticMetricMessagesSec + index)
prod.counters[index] = new(int64)
}
prod.SetCheckFuseCallback(prod.isClusterUp)
return nil
}
示例3: Configure
// Configure initializes this producer with values from a plugin config.
func (prod *Scribe) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
prod.SetStopCallback(prod.close)
host := conf.GetString("Address", "localhost:1463")
prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192)
prod.windowSize = prod.batchMaxCount
prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2)
prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount)
prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
prod.batch = core.NewMessageBatch(prod.batchMaxCount)
prod.bufferSizeByte = conf.GetInt("ConnectionBufferSizeKB", 1<<10) << 10 // 1 MB
prod.category = conf.GetStreamMap("Category", "")
// Initialize scribe connection
prod.socket, err = thrift.NewTSocket(host)
if err != nil {
Log.Error.Print("Scribe socket error:", err)
return err
}
prod.transport = thrift.NewTFramedTransport(prod.socket)
binProtocol := thrift.NewTBinaryProtocol(prod.transport, false, false)
prod.scribe = scribe.NewScribeClientProtocol(prod.transport, binProtocol, binProtocol)
prod.lastMetricUpdate = time.Now()
prod.counters = make(map[string]*int64)
shared.Metric.New(scribeMetricWindowSize)
shared.Metric.SetI(scribeMetricWindowSize, prod.windowSize)
for _, category := range prod.category {
shared.Metric.New(scribeMetricMessages + category)
shared.Metric.New(scribeMetricMessagesSec + category)
prod.counters[category] = new(int64)
}
prod.SetCheckFuseCallback(prod.tryOpenConnection)
return nil
}
示例4: Configure
// Configure initializes this producer with values from a plugin config.
func (prod *ElasticSearch) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
defaultServer := []string{"localhost"}
numConnections := conf.GetInt("Connections", 6)
retrySec := conf.GetInt("RetrySec", 5)
prod.conn = elastigo.NewConn()
prod.conn.Hosts = conf.GetStringArray("Servers", defaultServer)
prod.conn.Domain = conf.GetString("Domain", prod.conn.Hosts[0])
prod.conn.ClusterDomains = prod.conn.Hosts
prod.conn.Port = strconv.Itoa(conf.GetInt("Port", 9200))
prod.conn.Username = conf.GetString("User", "")
prod.conn.Password = conf.GetString("Password", "")
prod.indexer = prod.conn.NewBulkIndexerErrors(numConnections, retrySec)
prod.indexer.BufferDelayMax = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
prod.indexer.BulkMaxBuffer = conf.GetInt("BatchSizeByte", 32768)
prod.indexer.BulkMaxDocs = conf.GetInt("BatchMaxCount", 128)
prod.indexer.Sender = func(buf *bytes.Buffer) error {
_, err := prod.conn.DoCommand("POST", "/_bulk", nil, buf)
if err != nil {
Log.Error.Print("ElasticSearch response error - ", err)
}
return err
}
prod.index = conf.GetStreamMap("Index", "")
prod.msgType = conf.GetStreamMap("Type", "log")
prod.msgTTL = conf.GetString("TTL", "")
prod.dayBasedIndex = conf.GetBool("DayBasedIndex", false)
return nil
}
示例5: Configure
// Configure initializes this producer with values from a plugin config.
func (prod *Kafka) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
if !conf.HasValue("Servers") {
return core.NewProducerError("No servers configured for producer.Kafka")
}
prod.servers = conf.GetStringArray("Servers", []string{})
prod.topic = conf.GetStreamMap("Topic", "")
prod.clientID = conf.GetString("ClientId", "gollum")
prod.config = kafka.NewConfig()
prod.config.ClientID = conf.GetString("ClientId", "gollum")
prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)
prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout
prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout
prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond
prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10
prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal)))
prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond
prod.config.Producer.Return.Errors = true
prod.config.Producer.Return.Successes = false
switch strings.ToLower(conf.GetString("Compression", compressNone)) {
default:
fallthrough
case compressNone:
prod.config.Producer.Compression = kafka.CompressionNone
case compressGZIP:
prod.config.Producer.Compression = kafka.CompressionGZIP
case compressSnappy:
prod.config.Producer.Compression = kafka.CompressionSnappy
}
switch strings.ToLower(conf.GetString("Partitioner", partRandom)) {
case partRandom:
prod.config.Producer.Partitioner = kafka.NewRandomPartitioner
case partRoundrobin:
prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner
default:
fallthrough
case partHash:
prod.config.Producer.Partitioner = kafka.NewHashPartitioner
}
prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192)
prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1)
prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second
prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0)
prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3)
prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond
return nil
}
示例6: Configure
// Configure initializes this producer with values from a plugin config.
func (prod *Kafka) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
prod.SetStopCallback(prod.close)
prod.servers = conf.GetStringArray("Servers", []string{"localhost:9092"})
prod.topic = conf.GetStreamMap("Topic", "")
prod.clientID = conf.GetString("ClientId", "gollum")
prod.lastMetricUpdate = time.Now()
prod.config = kafka.NewConfig()
prod.config.ClientID = conf.GetString("ClientId", "gollum")
prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)
prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout
prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout
prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond
prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10
prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal)))
prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond
prod.config.Producer.Return.Errors = true
prod.config.Producer.Return.Successes = true
switch strings.ToLower(conf.GetString("Compression", compressNone)) {
default:
fallthrough
case compressNone:
prod.config.Producer.Compression = kafka.CompressionNone
case compressGZIP:
prod.config.Producer.Compression = kafka.CompressionGZIP
case compressSnappy:
prod.config.Producer.Compression = kafka.CompressionSnappy
}
switch strings.ToLower(conf.GetString("Partitioner", partRandom)) {
case partRandom:
prod.config.Producer.Partitioner = kafka.NewRandomPartitioner
case partRoundrobin:
prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner
default:
fallthrough
case partHash:
prod.config.Producer.Partitioner = kafka.NewHashPartitioner
}
prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192)
prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1)
prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second
prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0)
prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3)
prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond
prod.batch = core.NewMessageBatch(conf.GetInt("Channel", 8192))
prod.counters = make(map[string]*int64)
for _, topic := range prod.topic {
shared.Metric.New(kafkaMetricMessages + topic)
shared.Metric.New(kafkaMetricMessagesSec + topic)
prod.counters[topic] = new(int64)
}
shared.Metric.New(kafkaMetricMissCount)
prod.SetCheckFuseCallback(prod.tryOpenConnection)
return nil
}