本文整理汇总了Golang中github.com/Shopify/sarama.StringEncoder函数的典型用法代码示例。如果您正苦于以下问题:Golang StringEncoder函数的具体用法?Golang StringEncoder怎么用?Golang StringEncoder使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了StringEncoder函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Write
func (k *Kafka) Write(points []*client.Point) error {
if len(points) == 0 {
return nil
}
for _, p := range points {
// Combine tags from Point and BatchPoints and grab the resulting
// line-protocol output string to write to Kafka
value := p.String()
m := &sarama.ProducerMessage{
Topic: k.Topic,
Value: sarama.StringEncoder(value),
}
if h, ok := p.Tags()[k.RoutingTag]; ok {
m.Key = sarama.StringEncoder(h)
}
_, _, err := k.producer.SendMessage(m)
if err != nil {
return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
err))
}
}
return nil
}
示例2: forwarding
func forwarding(pl ...ProcessLogic) ProcessLogic {
return eventProcessor(func(n fsmonitor.Notice) {
for _, p := range pl {
p.Process(n)
}
/* If no message key set, all messages will be distributed randomly
* over the different partitions.*/
partition, offset, err := noticeSender.SendMessage(&sarama.ProducerMessage{
Topic: *topic,
Key: sarama.StringEncoder(fmt.Sprintf("%v", n.Type())),
Value: sarama.StringEncoder(n.Name()),
})
if err != nil {
Logger.Printf("Failed to store your data:, %s", err)
} else {
// The tuple (topic, partition, offset) can be used as a unique identifier
// for a message in a Kafka cluster.
Logger.Printf("Your data is stored with unique identifier kafka://%s/%d/%d", *topic, partition, offset)
}
})
}
示例3: Enqueue
func (q KafkaTopic) Enqueue(key, value string) {
q.Producable <- &sarama.ProducerMessage{
Topic: q.Topic,
Key: sarama.StringEncoder(key),
Value: sarama.StringEncoder(value),
}
}
示例4: Write
func (k *Kafka) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {
return nil
}
for _, metric := range metrics {
values, err := k.serializer.Serialize(metric)
if err != nil {
return err
}
var pubErr error
for _, value := range values {
m := &sarama.ProducerMessage{
Topic: k.Topic,
Value: sarama.StringEncoder(value),
}
if h, ok := metric.Tags()[k.RoutingTag]; ok {
m.Key = sarama.StringEncoder(h)
}
_, _, pubErr = k.producer.SendMessage(m)
}
if pubErr != nil {
return fmt.Errorf("FAILED to send kafka message: %s\n", pubErr)
}
}
return nil
}
示例5: Publish
func Publish(input chan *FileEvent, source string, ctrl chan bool) {
clientConfig := sarama.NewConfig()
clientConfig.Producer.RequiredAcks = sarama.WaitForLocal
clientConfig.Producer.Compression = sarama.CompressionSnappy
clientConfig.Producer.Flush.Frequency = 500 * time.Millisecond
clientConfig.Producer.Flush.Messages = 200
clientConfig.Producer.Flush.MaxMessages = 200
clientConfig.Producer.Flush.Bytes = 16384
clientConfig.Producer.Return.Successes = true
clientConfig.Producer.Partitioner = sarama.NewRoundRobinPartitioner
clientConfig.ChannelBufferSize = kafkabuffer
//brokerList := []string{"127.0.0.1:9092"}
var producer sarama.AsyncProducer
var err error
for {
producer, err = sarama.NewAsyncProducer(brokerList, clientConfig)
if err != nil {
log.Error("Publish: Failed to start Sarama producer: ", err)
log.Info("waiting....")
time.Sleep(1 * time.Second)
} else {
break
}
}
defer func() {
if err := producer.Close(); err != nil {
log.Error("Failed to shutdown producer cleanly", err)
}
}()
registrar := &Registrar{source: source, publishCtrl: ctrl}
go registrar.RegistrarDo(producer.Errors(), producer.Successes())
topic := kafkaTopic
baseName := filepath.Base(source)
if len(topicmap) > 0 {
tmpTopic := genTopic(baseName, topicmap)
if tmpTopic != "" {
topic = tmpTopic
}
}
key := hashKey
for event := range input {
log.Debugf("%v, %v, %v, %v", *event.Source, *event.Text, event.Line, event.Offset)
key = strconv.FormatInt(event.Offset, 10)
producer.Input() <- &sarama.ProducerMessage{
Topic: topic,
Key: sarama.StringEncoder(key),
Value: sarama.StringEncoder(*event.Text),
Metadata: event,
}
}
}
示例6: main
func main() {
// Setup configuration
config := sarama.NewConfig()
// Return specifies what channels will be populated.
// If they are set to true, you must read from
// config.Producer.Return.Successes = true
// The total number of times to retry sending a message (default 3).
config.Producer.Retry.Max = 5
// The level of acknowledgement reliability needed from the broker.
config.Producer.RequiredAcks = sarama.WaitForAll
brokers := []string{"localhost:9092"}
producer, err := sarama.NewAsyncProducer(brokers, config)
if err != nil {
// Should not reach here
panic(err)
}
defer func() {
if err := producer.Close(); err != nil {
// Should not reach here
panic(err)
}
}()
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
var enqueued, errors int
doneCh := make(chan struct{})
go func() {
for {
time.Sleep(500 * time.Millisecond)
strTime := strconv.Itoa(int(time.Now().Unix()))
msg := &sarama.ProducerMessage{
Topic: "important",
Key: sarama.StringEncoder(strTime),
Value: sarama.StringEncoder("Something Cool"),
}
select {
case producer.Input() <- msg:
enqueued++
fmt.Println("Produce message")
case err := <-producer.Errors():
errors++
fmt.Println("Failed to produce message:", err)
case <-signals:
doneCh <- struct{}{}
}
}
}()
<-doneCh
log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
}
示例7: Test01
func (suite *KafkaTester) Test01() {
t := suite.T()
assert := assert.New(t)
const M1 = "message one"
const M2 = "message two"
var producer sarama.AsyncProducer
var consumer sarama.Consumer
var partitionConsumer sarama.PartitionConsumer
var err error
topic := makeTopicName()
{
config := sarama.NewConfig()
config.Producer.Return.Successes = false
config.Producer.Return.Errors = false
producer, err = sarama.NewAsyncProducer([]string{suite.server}, config)
assert.NoError(err)
defer close(t, producer)
producer.Input() <- &sarama.ProducerMessage{
Topic: topic,
Key: nil,
Value: sarama.StringEncoder(M1)}
producer.Input() <- &sarama.ProducerMessage{
Topic: topic,
Key: nil,
Value: sarama.StringEncoder(M2)}
}
{
consumer, err = sarama.NewConsumer([]string{suite.server}, nil)
assert.NoError(err)
defer close(t, consumer)
partitionConsumer, err = consumer.ConsumePartition(topic, 0, 0)
assert.NoError(err)
defer close(t, partitionConsumer)
}
{
mssg1 := <-partitionConsumer.Messages()
//t.Logf("Consumed: offset:%d value:%v", mssg1.Offset, string(mssg1.Value))
mssg2 := <-partitionConsumer.Messages()
//t.Logf("Consumed: offset:%d value:%v", mssg2.Offset, string(mssg2.Value))
assert.EqualValues(M1, string(mssg1.Value))
assert.EqualValues(M2, string(mssg2.Value))
}
}
示例8: main
func main() {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
brokers := []string{"localhost:9092"}
producer, err := sarama.NewAsyncProducer(brokers, config)
if err != nil {
panic(err)
}
defer func() {
if err := producer.Close(); err != nil {
panic(err)
}
}()
deviceIds := [1]string{"28-00000626aa4d"}
for i := 0; i < len(deviceIds); i++ {
deviceId := deviceIds[i]
go func() {
for {
temperatureValue := getTemperatureValue(deviceId)
fmt.Println(temperatureValue)
msg := &sarama.ProducerMessage{
Topic: "important",
Key: sarama.StringEncoder(deviceId),
Value: sarama.StringEncoder(strconv.FormatFloat(temperatureValue, 'E', -1, 64)),
}
select {
case producer.Input() <- msg:
fmt.Println("Produce message")
case err := <-producer.Errors():
fmt.Println("Failed to produce message:", err)
}
time.Sleep(5 * time.Second)
}
}()
}
select {}
}
示例9: main
func main() {
configFile := flag.String("c", "", "Config file")
messageValue := flag.String("m", "", "Message")
amount := flag.Int("a", 1, "Amount of messages")
flag.Parse()
if *configFile == "" || *messageValue == "" {
flag.PrintDefaults()
os.Exit(1)
}
options, err := revolver.LoadOptions(*configFile)
if err != nil {
log.Fatalln(err)
os.Exit(1)
}
sarama.Logger = logger
var keyEncoder, valueEncoder sarama.Encoder
keyEncoder = sarama.StringEncoder(time.Now().String())
if *messageValue != "" {
valueEncoder = sarama.StringEncoder(*messageValue)
}
config := sarama.NewConfig()
config.Producer.Partitioner = sarama.NewRandomPartitioner
producer, err := sarama.NewSyncProducer(options.Brokers, config)
if err != nil {
logger.Fatalln("FAILED to open the producer:", err)
}
defer producer.Close()
topic := options.KafkaTopics[0]
for i := 0; i < *amount; i++ {
partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: topic,
Key: keyEncoder,
Value: valueEncoder,
})
if err != nil {
logger.Println("FAILED to produce message:", err)
} else {
logger.Printf("msg: %d, topic=%s\tpartition=%d\toffset=%d\n", i, topic, partition, offset)
}
}
}
示例10: pubKafkaLoop
func pubKafkaLoop(seq int) {
cf := sarama.NewConfig()
cf.Producer.RequiredAcks = sarama.WaitForLocal
cf.Producer.Partitioner = sarama.NewHashPartitioner
cf.Producer.Timeout = time.Second
//cf.Producer.Compression = sarama.CompressionSnappy
cf.Producer.Retry.Max = 3
producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, cf)
if err != nil {
stress.IncCounter("fail", 1)
log.Println(err)
return
}
defer producer.Close()
msg := strings.Repeat("X", sz)
for i := 0; i < loops; i++ {
_, _, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: topic,
Value: sarama.StringEncoder(msg),
})
if err == nil {
stress.IncCounter("ok", 1)
} else {
stress.IncCounter("fail", 1)
}
}
}
示例11: main
func main() {
flag.Parse()
if *brokers == "" {
flag.PrintDefaults()
os.Exit(1)
}
brokerList := strings.Split(*brokers, ",")
producer := newAsyncProducer(brokerList)
f, err := os.Open(*filename)
defer f.Close()
if err != nil {
log.Fatal("Could not open raw file:", err)
}
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
producer.Input() <- &sarama.ProducerMessage{
Topic: *topic,
Value: sarama.StringEncoder(scanner.Text()),
}
}
}
示例12: produce
func (tp *TypedProducer) produce(cmData *CmData) {
// logger.Debug("produce requiredAcks=%d", int(tp.requiredAcks))
// fetch and fill
pmpe := tp.pmp.fetch()
pmpe.privData = cmData
pmsg := pmpe.pmsg
pmsg.Topic = cmData.topic
if len(cmData.key) == 0 {
// if key is empty, using sarama.RandomPartitioner
pmsg.Key = nil
} else {
pmsg.Key = sarama.StringEncoder(cmData.key)
}
pmsg.Value = sarama.ByteEncoder(cmData.data)
pmsg.Metadata = pmpe
// do produce
for {
select {
case tp.ap.Input() <-pmsg:
return
case perr := <-tp.ap.Errors():
tp.processProduceErrors(perr)
}
}
}
示例13: Write
func (o *KafkaOutput) Write(data []byte) (n int, err error) {
headers := make(map[string]string)
proto.ParseHeaders([][]byte{data}, func(header []byte, value []byte) bool {
headers[string(header)] = string(value)
return true
})
req := payloadBody(data)
kafkaMessage := KafkaMessage{
ReqURL: string(proto.Path(req)),
ReqMethod: string(proto.Method(req)),
ReqBody: string(proto.Body(req)),
ReqHeaders: headers,
}
jsonMessage, _ := json.Marshal(&kafkaMessage)
message := sarama.StringEncoder(jsonMessage)
o.producer.Input() <- &sarama.ProducerMessage{
Topic: o.config.topic,
Value: message,
}
return len(message), nil
}
示例14: broadcastKafka
func broadcastKafka(msg []byte) (err error) {
message := &sarama.ProducerMessage{Topic: KafkaPushsTopic, Key: sarama.StringEncoder(define.KAFKA_MESSAGE_BROADCAST), Value: sarama.ByteEncoder(msg)}
if _, _, err = producer.SendMessage(message); err != nil {
return
}
return
}
示例15: broadcastRoomKafka
func broadcastRoomKafka(ridStr string, msg []byte) (err error) {
message := &sarama.ProducerMessage{Topic: KafkaPushsTopic, Key: sarama.StringEncoder(ridStr), Value: sarama.ByteEncoder(msg)}
if _, _, err = producer.SendMessage(message); err != nil {
return
}
return
}