本文整理汇总了Golang中github.com/Shopify/sarama.NewAsyncProducer函数的典型用法代码示例。如果您正苦于以下问题:Golang NewAsyncProducer函数的具体用法?Golang NewAsyncProducer怎么用?Golang NewAsyncProducer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewAsyncProducer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
var host = flag.String("kafka", "127.0.0.1:9092", "IP address:port of kafka")
flag.Parse()
duration := 10 * time.Millisecond
src := make(chan uint32)
dst := make(chan uint32)
notify := make(chan os.Signal, 1)
signal.Notify(notify, os.Interrupt, os.Kill)
config := kafka.NewConfig()
config.Producer.Return.Successes = true
k_producer, err := kafka.NewAsyncProducer([]string{*host}, config)
if err != nil {
panic(err)
}
fmt.Println("src_ip,dst_ip,src_coord,dst_coord,received_at")
//dc_ips are data center IPs
dc_ips := []uint32{1222977025, 2212761857, 2169380865}
go producer(src, dc_ips, duration)
go producer(dst, dc_ips, duration)
go consumer(src, dst, k_producer)
go func(producer kafka.AsyncProducer) {
for {
<-producer.Successes()
}
}(k_producer)
s := <-notify
fmt.Println("signal:", s)
fmt.Println("done.")
}
示例2: newAccessLogProducer
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
// For the access log, we are looking for AP semantics, with high throughput.
// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
config := sarama.NewConfig()
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
producer, err := sarama.NewAsyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
// We will just log to STDOUT if we're not able to produce messages.
// Note: messages will only be returned here after all retry attempts are exhausted.
go func() {
for err := range producer.Errors() {
log.Println("Failed to write access log entry:", err)
}
}()
return producer
}
示例3: newStorage
func newStorage(machineName string) (storage.StorageDriver, error) {
config := kafka.NewConfig()
tlsConfig, err := generateTLSConfig()
if err != nil {
return nil, err
}
if tlsConfig != nil {
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
config.Producer.RequiredAcks = kafka.WaitForAll
brokerList := strings.Split(*brokers, ",")
glog.V(4).Infof("Kafka brokers:%q", brokers)
producer, err := kafka.NewAsyncProducer(brokerList, config)
if err != nil {
return nil, err
}
ret := &kafkaStorage{
producer: producer,
topic: *topic,
machineName: machineName,
}
return ret, nil
}
示例4: pubKafkaAsyncLoop
func pubKafkaAsyncLoop(seq int) {
cf := sarama.NewConfig()
cf.Producer.Flush.Frequency = time.Second * 10
cf.Producer.Flush.Messages = 1000
cf.Producer.Flush.MaxMessages = 1000
cf.Producer.RequiredAcks = sarama.WaitForLocal
cf.Producer.Partitioner = sarama.NewHashPartitioner
cf.Producer.Timeout = time.Second
//cf.Producer.Compression = sarama.CompressionSnappy
cf.Producer.Retry.Max = 3
producer, err := sarama.NewAsyncProducer([]string{"localhost:9092"}, cf)
if err != nil {
stress.IncCounter("fail", 1)
log.Println(err)
return
}
defer producer.Close()
msg := strings.Repeat("X", sz)
for i := 0; i < loops; i++ {
producer.Input() <- &sarama.ProducerMessage{
Topic: topic,
Value: sarama.StringEncoder(msg),
}
stress.IncCounter("ok", 1)
}
}
示例5: NewEventPublisher
func NewEventPublisher() (*EventPublisher, error) {
config := sarama.NewConfig()
config.ClientID = ipresolver.GetLocalAddr()
config.Producer.RequiredAcks = sarama.WaitForLocal
config.Producer.Compression = sarama.CompressionNone
config.Producer.Return.Successes = false
config.Producer.Return.Errors = false
config.Producer.Partitioner = sarama.NewHashPartitioner
asyncProducer, err := sarama.NewAsyncProducer(eatonconfig.KafkaServers, config)
if err != nil {
return nil, err
}
if config.Producer.Return.Successes {
go func() {
for msg := range asyncProducer.Successes() {
log.Println("Sent Message to logs: ", msg.Key)
}
}()
}
if config.Producer.Return.Errors {
go func() {
for err := range asyncProducer.Errors() {
log.Println("failed to send message to logs: ", err.Error())
}
}()
}
return &EventPublisher{
producer: asyncProducer,
}, nil
}
示例6: makePub
func (this *Mirror) makePub(c2 *zk.ZkCluster) (sarama.AsyncProducer, error) {
cf := sarama.NewConfig()
cf.Metadata.RefreshFrequency = time.Minute * 10
cf.Metadata.Retry.Max = 3
cf.Metadata.Retry.Backoff = time.Second * 3
cf.ChannelBufferSize = 1000
cf.Producer.Return.Errors = true
cf.Producer.Flush.Messages = 2000 // 2000 message in batch
cf.Producer.Flush.Frequency = time.Second // flush interval
cf.Producer.Flush.MaxMessages = 0 // unlimited
cf.Producer.RequiredAcks = sarama.WaitForLocal
cf.Producer.Retry.Backoff = time.Second * 4
cf.Producer.Retry.Max = 3
cf.Net.DialTimeout = time.Second * 30
cf.Net.WriteTimeout = time.Second * 30
cf.Net.ReadTimeout = time.Second * 30
switch this.Compress {
case "gzip":
cf.Producer.Compression = sarama.CompressionGZIP
case "snappy":
cf.Producer.Compression = sarama.CompressionSnappy
}
return sarama.NewAsyncProducer(c2.BrokerList(), cf)
}
示例7: Setup
// Setup prepares the Requester for benchmarking.
func (k *kafkaRequester) Setup() error {
config := sarama.NewConfig()
producer, err := sarama.NewAsyncProducer(k.urls, config)
if err != nil {
return err
}
consumer, err := sarama.NewConsumer(k.urls, nil)
if err != nil {
producer.Close()
return err
}
partitionConsumer, err := consumer.ConsumePartition(k.topic, 0, sarama.OffsetNewest)
if err != nil {
producer.Close()
consumer.Close()
return err
}
k.producer = producer
k.consumer = consumer
k.partitionConsumer = partitionConsumer
k.msg = &sarama.ProducerMessage{
Topic: k.topic,
Value: sarama.ByteEncoder(make([]byte, k.payloadSize)),
}
return nil
}
示例8: initProducer
func initProducer(moduleConfig *Config) (*Producer, error) {
fmt.Println("[INFO] initProducer called")
brokerList := moduleConfig.Kafka.BrokerList
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll // only wait for leader to ack
config.Producer.Compression = sarama.CompressionSnappy
config.Producer.Flush.Frequency = 500 * time.Millisecond
var producer sarama.AsyncProducer
var err error
for currConnAttempt := 0; currConnAttempt < moduleConfig.Kafka.MaxRetry; currConnAttempt++ {
producer, err = sarama.NewAsyncProducer(brokerList, config)
if err == nil {
break
}
fmt.Println("[INFO] Connection attempt faild (", (currConnAttempt + 1), "/", moduleConfig.Kafka.MaxRetry, ")")
<-time.After(time.Second * 5)
}
if err != nil {
fmt.Println("[ERROR] Unable to setup kafka producer", err)
return nil, err
}
//You must read from the Errors() channel or the producer will deadlock.
go func() {
for err := range producer.Errors() {
log.Println("[ERROR] Kadka producer Error: ", err)
}
}()
fmt.Println("[INFO] kafka producer initialized successfully")
return &Producer{producer: producer, id: CreatedProducersLength()}, nil
}
示例9: main
func main() {
config := sarama.NewConfig()
config.Producer.Compression = sarama.CompressionSnappy
flag.StringVar(&kafkaBrokers, "brokers", "localhost:9092", "The kafka broker addresses")
flag.Parse()
brokers := []string{}
for _, broker := range strings.Split(kafkaBrokers, ",") {
brokers = append(brokers, broker)
}
producer, err := sarama.NewAsyncProducer(brokers, config)
if err == nil {
fmt.Println("Connected to Kafka brokers", "["+kafkaBrokers+"]")
ifaces, err := net.Interfaces()
if err != nil {
log.Fatal("Cannot get network interfaces")
}
for _, iface := range ifaces {
addrs, _ := iface.Addrs()
if iface.Name != "lo" && len(addrs) > 0 {
fmt.Printf("Starting live capture on %s interface...", iface.Name)
decodePackets(iface.Name, producer)
}
}
} else {
log.Fatal("Can't create the Kafka producer")
}
}
示例10: handler
func handler(w http.ResponseWriter, r *http.Request) {
decoder := json.NewDecoder(r.Body)
var request Request
err := decoder.Decode(&request)
if err != nil {
log.Print("Could not decode request")
http.Error(w, err.Error(), 500)
return
}
log.Print("Received request for kind: ", request.Kind)
config := sarama.NewConfig()
producer, err := sarama.NewAsyncProducer(KafkaAddresses, config)
if err != nil {
log.Print("Could not connect to Kafka: ", err)
http.Error(w, err.Error(), 500)
return
}
log.Print("Connected to Kafka")
message := sarama.ProducerMessage{
Topic: request.Kind,
Value: MapEncoder(request.Data),
}
producer.Input() <- &message
log.Print("Message sent")
fmt.Fprintf(w, "OK")
}
示例11: main
func main() {
producer, err := sarama.NewAsyncProducer([]string{"10.3.10.32:9091"}, nil)
if err != nil {
panic(err)
}
defer func() {
if err = producer.Close(); err != nil {
log.Fatalln(err)
}
}()
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
var enqueued, errors int
ProducerLoop:
for {
select {
case producer.Input() <- &sarama.ProducerMessage{Topic: "dataman_test", Key: nil, Value: sarama.StringEncoder("testing 123")}:
enqueued++
case err = <-producer.Errors():
log.Println("Failed to produce message", err)
errors++
case <-signals:
break ProducerLoop
}
}
log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
}
示例12: NewKafkaProducer
func NewKafkaProducer() (*IndeedKafkaProducer, error) {
config := sarama.NewConfig()
config.ClientID = ipresolver.GetLocalAddr()
config.Producer.RequiredAcks = sarama.WaitForLocal
config.Producer.Compression = sarama.CompressionNone
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
config.Producer.Partitioner = sarama.NewHashPartitioner
asyncProducer, err := sarama.NewAsyncProducer(eatonconfig.KafkaServers, config)
if err != nil {
return nil, err
}
go func() {
for msg := range asyncProducer.Successes() {
eatonevents.Info(fmt.Sprintf("Successfully sent message to topic %s with key %s", msg.Topic, msg.Key))
}
}()
go func() {
for err := range asyncProducer.Errors() {
eatonevents.Error("Failed to send message due to error: ", err)
}
}()
return &IndeedKafkaProducer{
producer: asyncProducer,
}, nil
}
示例13: NewKafka
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
config := sarama.NewConfig()
client, _ := sarama.NewClient([]string{"localhost:9092"}, config)
topic := "test"
pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config)
consumer, _ := sarama.NewConsumerFromClient(client)
sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
var handler benchmark.MessageHandler
if testLatency {
handler = &benchmark.LatencyMessageHandler{
NumberOfMessages: numberOfMessages,
Latencies: []float32{},
}
} else {
handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
}
return &Kafka{
handler: handler,
client: client,
pub: pub,
sub: sub,
topic: topic,
}
}
示例14: queueInit
func queueInit() {
config := sarama.NewConfig()
config.ClientID = args.ID
// Acks
if args.Pub.Ack {
config.Producer.RequiredAcks = sarama.WaitForAll
} else {
config.Producer.RequiredAcks = sarama.WaitForLocal
}
// Compress
if args.Pub.Compress {
config.Producer.Compression = sarama.CompressionSnappy
} else {
config.Producer.Compression = sarama.CompressionNone
}
// Flush Intervals
if args.Pub.FlushFreq > 0 {
config.Producer.Flush.Frequency = time.Duration(args.Pub.FlushFreq) * time.Second
} else {
config.Producer.Flush.Frequency = 1 * time.Second
}
producer, err := sarama.NewAsyncProducer(args.Pub.URI, config)
if err != nil {
log.Fatalln("Failed to start Kafka producer:", err)
}
qProducer = producer
}
示例15: NewKafkaOutput
// NewKafkaOutput creates instance of kafka producer client.
func NewKafkaOutput(address string, config *KafkaConfig) io.Writer {
c := sarama.NewConfig()
c.Producer.RequiredAcks = sarama.WaitForLocal
c.Producer.Compression = sarama.CompressionSnappy
c.Producer.Flush.Frequency = KafkaOutputFrequency * time.Millisecond
brokerList := strings.Split(config.host, ",")
producer, err := sarama.NewAsyncProducer(brokerList, c)
if err != nil {
log.Fatalln("Failed to start Sarama(Kafka) producer:", err)
}
o := &KafkaOutput{
config: config,
producer: producer,
}
if Settings.verbose {
// Start infinite loop for tracking errors for kafka producer.
go o.ErrorHandler()
}
return o
}