本文整理汇总了Golang中github.com/Shopify/sarama.NewConfig函数的典型用法代码示例。如果您正苦于以下问题:Golang NewConfig函数的具体用法?Golang NewConfig怎么用?Golang NewConfig使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewConfig函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: newConsumer
func newConsumer() (masterConsumer kafka.Consumer, consumers []kafka.PartitionConsumer) {
config := kafka.NewConfig()
config.Net.KeepAlive = 30 * time.Second
config.Consumer.Retry.Backoff = 25 * time.Millisecond
consumers = make([]kafka.PartitionConsumer, 0)
retry(func() (err error) {
var consumer kafka.PartitionConsumer
var partitions []int32
masterConsumer, err = kafka.NewConsumer(kafkas, config)
if err != nil {
return
}
partitions, err = masterConsumer.Partitions(topic)
if err != nil {
return
}
for _, partition := range partitions {
consumer, err = masterConsumer.ConsumePartition(topic, partition, kafka.OffsetNewest)
if err != nil {
return
}
consumers = append(consumers, consumer)
}
return
})
return
}
示例2: TestProducerReturnsExpectationsToChannels
func TestProducerReturnsExpectationsToChannels(t *testing.T) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
mp := NewAsyncProducer(t, config)
mp.ExpectInputAndSucceed()
mp.ExpectInputAndSucceed()
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"}
msg1 := <-mp.Successes()
msg2 := <-mp.Successes()
err1 := <-mp.Errors()
if msg1.Topic != "test 1" {
t.Error("Expected message 1 to be returned first")
}
if msg2.Topic != "test 2" {
t.Error("Expected message 2 to be returned second")
}
if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers {
t.Error("Expected message 3 to be returned as error")
}
if err := mp.Close(); err != nil {
t.Error(err)
}
}
示例3: pubKafkaLoop
func pubKafkaLoop(seq int) {
cf := sarama.NewConfig()
cf.Producer.RequiredAcks = sarama.WaitForLocal
cf.Producer.Partitioner = sarama.NewHashPartitioner
cf.Producer.Timeout = time.Second
//cf.Producer.Compression = sarama.CompressionSnappy
cf.Producer.Retry.Max = 3
producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, cf)
if err != nil {
stress.IncCounter("fail", 1)
log.Println(err)
return
}
defer producer.Close()
msg := strings.Repeat("X", sz)
for i := 0; i < loops; i++ {
_, _, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: topic,
Value: sarama.StringEncoder(msg),
})
if err == nil {
stress.IncCounter("ok", 1)
} else {
stress.IncCounter("fail", 1)
}
}
}
示例4: consumeCluster
func (this *Peek) consumeCluster(zkcluster *zk.ZkCluster, topicPattern string,
partitionId int, msgChan chan *sarama.ConsumerMessage) {
brokerList := zkcluster.BrokerList()
if len(brokerList) == 0 {
return
}
kfk, err := sarama.NewClient(brokerList, sarama.NewConfig())
if err != nil {
this.Ui.Output(err.Error())
return
}
//defer kfk.Close() // FIXME how to close it
topics, err := kfk.Topics()
if err != nil {
this.Ui.Output(err.Error())
return
}
for _, t := range topics {
if patternMatched(t, topicPattern) {
go this.simpleConsumeTopic(zkcluster, kfk, t, int32(partitionId), msgChan)
}
}
}
示例5: NewClient
// NewClient returns a Kafka client
func NewClient(addresses []string) (sarama.Client, error) {
config := sarama.NewConfig()
hostname, err := os.Hostname()
if err != nil {
hostname = ""
}
config.ClientID = hostname
config.Producer.Compression = sarama.CompressionSnappy
config.Producer.Return.Successes = true
var client sarama.Client
retries := outOfBrokersRetries + 1
for retries > 0 {
client, err = sarama.NewClient(addresses, config)
retries--
if err == sarama.ErrOutOfBrokers {
glog.Errorf("Can't connect to the Kafka cluster at %s (%d retries left): %s",
addresses, retries, err)
time.Sleep(outOfBrokersBackoff)
} else {
break
}
}
return client, err
}
示例6: makePub
func (this *Mirror) makePub(c2 *zk.ZkCluster) (sarama.AsyncProducer, error) {
cf := sarama.NewConfig()
cf.Metadata.RefreshFrequency = time.Minute * 10
cf.Metadata.Retry.Max = 3
cf.Metadata.Retry.Backoff = time.Second * 3
cf.ChannelBufferSize = 1000
cf.Producer.Return.Errors = true
cf.Producer.Flush.Messages = 2000 // 2000 message in batch
cf.Producer.Flush.Frequency = time.Second // flush interval
cf.Producer.Flush.MaxMessages = 0 // unlimited
cf.Producer.RequiredAcks = sarama.WaitForLocal
cf.Producer.Retry.Backoff = time.Second * 4
cf.Producer.Retry.Max = 3
cf.Net.DialTimeout = time.Second * 30
cf.Net.WriteTimeout = time.Second * 30
cf.Net.ReadTimeout = time.Second * 30
switch this.Compress {
case "gzip":
cf.Producer.Compression = sarama.CompressionGZIP
case "snappy":
cf.Producer.Compression = sarama.CompressionSnappy
}
return sarama.NewAsyncProducer(c2.BrokerList(), cf)
}
示例7: main
func main() {
config := sarama.NewConfig()
config.Producer.Compression = sarama.CompressionSnappy
flag.StringVar(&kafkaBrokers, "brokers", "localhost:9092", "The kafka broker addresses")
flag.Parse()
brokers := []string{}
for _, broker := range strings.Split(kafkaBrokers, ",") {
brokers = append(brokers, broker)
}
producer, err := sarama.NewAsyncProducer(brokers, config)
if err == nil {
fmt.Println("Connected to Kafka brokers", "["+kafkaBrokers+"]")
ifaces, err := net.Interfaces()
if err != nil {
log.Fatal("Cannot get network interfaces")
}
for _, iface := range ifaces {
addrs, _ := iface.Addrs()
if iface.Name != "lo" && len(addrs) > 0 {
fmt.Printf("Starting live capture on %s interface...", iface.Name)
decodePackets(iface.Name, producer)
}
}
} else {
log.Fatal("Can't create the Kafka producer")
}
}
示例8: kafkaClient
// kafkaClient initializes a connection to a Kafka cluster and
// initializes one or more clientProducer() (producer instances).
func kafkaClient(n int) {
switch noop {
// If not noop, actually fire up Kafka connections and send messages.
case false:
cId := "client_" + strconv.Itoa(n)
conf := kafka.NewConfig()
if compression != kafka.CompressionNone {
conf.Producer.Compression = compression
}
conf.Producer.Flush.MaxMessages = batchSize
client, err := kafka.NewClient(brokers, conf)
if err != nil {
log.Println(err)
os.Exit(1)
} else {
log.Printf("%s connected\n", cId)
}
for i := 0; i < producers; i++ {
go clientProducer(client)
}
// If noop, we're not creating connections at all.
// Just generate messages and burn CPU.
default:
for i := 0; i < producers; i++ {
go clientDummyProducer()
}
}
<-killClients
}
示例9: main
func main() {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Retry.Max = 5
// brokers := []string{"192.168.59.103:9092"}
brokers := []string{"localhost:9092"}
producer, err := sarama.NewSyncProducer(brokers, config)
if err != nil {
// Should not reach here
panic(err)
}
defer func() {
if err := producer.Close(); err != nil {
// Should not reach here
panic(err)
}
}()
topic := "important"
msg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.StringEncoder("Something Cool"),
}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
panic(err)
}
fmt.Printf("Message is stored in topic(%s)/partition(%d)/offset(%d)\n", topic, partition, offset)
}
示例10: NewKafka
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
config := sarama.NewConfig()
client, _ := sarama.NewClient([]string{"localhost:9092"}, config)
topic := "test"
pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config)
consumer, _ := sarama.NewConsumerFromClient(client)
sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
var handler benchmark.MessageHandler
if testLatency {
handler = &benchmark.LatencyMessageHandler{
NumberOfMessages: numberOfMessages,
Latencies: []float32{},
}
} else {
handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
}
return &Kafka{
handler: handler,
client: client,
pub: pub,
sub: sub,
topic: topic,
}
}
示例11: NewKafkaProducer
func NewKafkaProducer() (*IndeedKafkaProducer, error) {
config := sarama.NewConfig()
config.ClientID = ipresolver.GetLocalAddr()
config.Producer.RequiredAcks = sarama.WaitForLocal
config.Producer.Compression = sarama.CompressionNone
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
config.Producer.Partitioner = sarama.NewHashPartitioner
asyncProducer, err := sarama.NewAsyncProducer(eatonconfig.KafkaServers, config)
if err != nil {
return nil, err
}
go func() {
for msg := range asyncProducer.Successes() {
eatonevents.Info(fmt.Sprintf("Successfully sent message to topic %s with key %s", msg.Topic, msg.Key))
}
}()
go func() {
for err := range asyncProducer.Errors() {
eatonevents.Error("Failed to send message due to error: ", err)
}
}()
return &IndeedKafkaProducer{
producer: asyncProducer,
}, nil
}
示例12: queueInit
func queueInit() {
config := sarama.NewConfig()
config.ClientID = args.ID
// Acks
if args.Pub.Ack {
config.Producer.RequiredAcks = sarama.WaitForAll
} else {
config.Producer.RequiredAcks = sarama.WaitForLocal
}
// Compress
if args.Pub.Compress {
config.Producer.Compression = sarama.CompressionSnappy
} else {
config.Producer.Compression = sarama.CompressionNone
}
// Flush Intervals
if args.Pub.FlushFreq > 0 {
config.Producer.Flush.Frequency = time.Duration(args.Pub.FlushFreq) * time.Second
} else {
config.Producer.Flush.Frequency = 1 * time.Second
}
producer, err := sarama.NewAsyncProducer(args.Pub.URI, config)
if err != nil {
log.Fatalln("Failed to start Kafka producer:", err)
}
qProducer = producer
}
示例13: main
func main() {
flag.Parse()
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
consumer, err := sarama.NewConsumer(brokers, config)
if err != nil {
log.Fatalln(err)
}
defer func() {
if err := consumer.Close(); err != nil {
panic(err)
}
}()
var pf ProcessFunc
switch {
case "+" == op:
pf = processAdd
case "-" == op:
pf = processSub
case "*" == op:
pf = processMul
case "/" == op:
pf = processDiv
}
// Set up one partition_consumer for each partition
partitions, err := consumer.Partitions(topic)
if err != nil {
log.Fatalln(err)
}
partition_consumers := make([]sarama.PartitionConsumer, len(partitions))
for idx, partition := range partitions {
pc, err := consumer.ConsumePartition(topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalln(err)
}
partition_consumers[idx] = pc
go func(pc sarama.PartitionConsumer) {
Serve(pc.Messages(), pf)
}(pc)
go func(pc sarama.PartitionConsumer) {
for err := range pc.Errors() {
log.Println(err)
}
}(pc)
}
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
<-signals
for _, pc := range partition_consumers {
fmt.Println("Closing partition, next offset", pc.HighWaterMarkOffset())
pc.AsyncClose()
}
}
示例14: main
func main() {
config := sarama.NewConfig()
// Handle errors manually
config.Consumer.Return.Errors = true
consumer, err := sarama.NewConsumer([]string{kafkaAddr}, config)
if err != nil {
panic(err)
}
defer consumer.Close()
logConsumer, err := consumer.ConsumePartition("buy", 0, sarama.OffsetNewest)
if err != nil {
panic(err)
}
defer logConsumer.Close()
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
for {
select {
case err := <-logConsumer.Errors():
log.Println(err)
case msg := <-logConsumer.Messages():
order := &Order{}
json.Unmarshal(msg.Value, order)
log.Printf("notification to %s with order %s", order.UserID, order.OrderID)
case <-signals:
return
}
}
}
示例15: NewKafkaSubscriber
// NewKafkaSubscriber will initiate a the experimental Kafka consumer.
func NewKafkaSubscriber(cfg *config.Kafka, offsetProvider func() int64, offsetBroadcast func(int64)) (*KafkaSubscriber, error) {
var (
err error
)
s := &KafkaSubscriber{
offset: offsetProvider,
broadcastOffset: offsetBroadcast,
partition: cfg.Partition,
stop: make(chan chan error, 1),
}
if len(cfg.BrokerHosts) == 0 {
return s, errors.New("at least 1 broker host is required")
}
if len(cfg.Topic) == 0 {
return s, errors.New("topic name is required")
}
s.topic = cfg.Topic
sconfig := sarama.NewConfig()
sconfig.Consumer.Return.Errors = true
s.cnsmr, err = sarama.NewConsumer(cfg.BrokerHosts, sconfig)
return s, err
}