本文整理汇总了Golang中github.com/Shopify/sarama.NewConsumer函数的典型用法代码示例。如果您正苦于以下问题:Golang NewConsumer函数的具体用法?Golang NewConsumer怎么用?Golang NewConsumer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewConsumer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewPeer
// NewPeer creates and returns a new Peer for communicating with Kafka.
func NewPeer(host string) (*Peer, error) {
host = strings.Split(host, ":")[0] + ":9092"
config := sarama.NewConfig()
client, err := sarama.NewClient([]string{host}, config)
if err != nil {
return nil, err
}
producer, err := sarama.NewAsyncProducer([]string{host}, config)
if err != nil {
return nil, err
}
consumer, err := sarama.NewConsumer([]string{host}, config)
if err != nil {
return nil, err
}
partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
return nil, err
}
return &Peer{
client: client,
producer: producer,
consumer: partitionConsumer,
send: make(chan []byte),
errors: make(chan error, 1),
done: make(chan bool),
}, nil
}
示例2: NewKafka
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig())
subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig())
topic := "test"
pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig())
consumerConfig := sarama.NewConsumerConfig()
consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages
consumerConfig.DefaultFetchSize = 10 * 1024 * 1024
sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig)
var handler benchmark.MessageHandler
if testLatency {
handler = &benchmark.LatencyMessageHandler{
NumberOfMessages: numberOfMessages,
Latencies: []float32{},
}
} else {
handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
}
return &Kafka{
handler: handler,
pubClient: pubClient,
subClient: subClient,
pub: pub,
sub: sub,
topic: topic,
}
}
示例3: main
func main() {
client, err := kafka.NewClient("my_client", []string{"localhost:9092"}, nil)
if err != nil {
panic(err)
} else {
fmt.Println("> connected")
}
defer client.Close()
consumer, err := kafka.NewConsumer(client, "my_topic", 0, "my_consumer_group", kafka.NewConsumerConfig())
if err != nil {
panic(err)
} else {
fmt.Println("> consumer ready")
}
defer consumer.Close()
msgCount := 0
consumerLoop:
for {
select {
case event := <-consumer.Events():
if event.Err != nil {
panic(event.Err)
}
msgCount++
case <-time.After(5 * time.Second):
fmt.Println("> timed out")
break consumerLoop
}
}
fmt.Println("Got", msgCount, "messages.")
}
示例4: NewKafkaSubscriber
// NewKafkaSubscriber will initiate a the experimental Kafka consumer.
func NewKafkaSubscriber(cfg *config.Kafka, offsetProvider func() int64, offsetBroadcast func(int64)) (*KafkaSubscriber, error) {
var (
err error
)
s := &KafkaSubscriber{
offset: offsetProvider,
broadcastOffset: offsetBroadcast,
partition: cfg.Partition,
stop: make(chan chan error, 1),
}
if len(cfg.BrokerHosts) == 0 {
return s, errors.New("at least 1 broker host is required")
}
if len(cfg.Topic) == 0 {
return s, errors.New("topic name is required")
}
s.topic = cfg.Topic
sconfig := sarama.NewConfig()
sconfig.Consumer.Return.Errors = true
s.cnsmr, err = sarama.NewConsumer(cfg.BrokerHosts, sconfig)
return s, err
}
示例5: NewPartitionConsumer
// NewPartitionConsumer creates a new partition consumer instance
func NewPartitionConsumer(group *ConsumerGroup, partition int32) (*PartitionConsumer, error) {
config := sarama.ConsumerConfig{
DefaultFetchSize: group.config.DefaultFetchSize,
EventBufferSize: group.config.EventBufferSize,
MaxMessageSize: group.config.MaxMessageSize,
MaxWaitTime: group.config.MaxWaitTime,
MinFetchSize: group.config.MinFetchSize,
OffsetMethod: sarama.OffsetMethodOldest,
}
offset, err := group.Offset(partition)
if err != nil {
return nil, err
} else if offset > 0 {
config.OffsetMethod = sarama.OffsetMethodManual
config.OffsetValue = offset
}
stream, err := sarama.NewConsumer(group.client, group.topic, partition, group.name, &config)
if err != nil {
return nil, err
}
return &PartitionConsumer{
stream: stream,
topic: group.topic,
partition: partition,
}, nil
}
示例6: main
func main() {
brokers := flag.String("brokers", "localhost:9093", "Comma separated kafka brokers list")
topic := flag.String("topic", "my-topic", "Kafka topic to send messages to")
flag.Parse()
logger := log.New(os.Stdout, "consumer ", log.Lmicroseconds)
consumer, err := sarama.NewConsumer(strings.Split(*brokers, ","), nil)
if err != nil {
logger.Panicln(err)
}
defer func() {
if err := consumer.Close(); err != nil {
logger.Fatalln(err)
}
}()
partitionConsumer, err := consumer.ConsumePartition(*topic, 0, sarama.OffsetNewest)
if err != nil {
logger.Panicln(err)
}
logger.Println("Start")
i := 0
for ; ; i++ {
msg := <-partitionConsumer.Messages()
if string(msg.Value) == "THE END" {
break
}
}
logger.Printf("Finished. Received %d messages.\n", i)
}
示例7: main
func main() {
client, err := sarama.NewClient("a_logger_for_mhub", []string{"localhost:9092"}, nil)
if err != nil {
panic(err)
} else {
os.Stderr.WriteString("> connected\n")
}
defer client.Close()
consumer, err := sarama.NewConsumer(client, "received", 0, "", nil)
if err != nil {
panic(err)
} else {
os.Stderr.WriteString("> consumer ready\n")
}
defer consumer.Close()
for {
select {
case event := <-consumer.Events():
if event.Err != nil {
panic(event.Err)
}
fmt.Println(utf8.FullRune(event.Value))
}
}
}
示例8: main
func main() {
flag.Parse()
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
consumer, err := sarama.NewConsumer(brokers, config)
if err != nil {
log.Fatalln(err)
}
defer func() {
if err := consumer.Close(); err != nil {
panic(err)
}
}()
var pf ProcessFunc
switch {
case "+" == op:
pf = processAdd
case "-" == op:
pf = processSub
case "*" == op:
pf = processMul
case "/" == op:
pf = processDiv
}
// Set up one partition_consumer for each partition
partitions, err := consumer.Partitions(topic)
if err != nil {
log.Fatalln(err)
}
partition_consumers := make([]sarama.PartitionConsumer, len(partitions))
for idx, partition := range partitions {
pc, err := consumer.ConsumePartition(topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalln(err)
}
partition_consumers[idx] = pc
go func(pc sarama.PartitionConsumer) {
Serve(pc.Messages(), pf)
}(pc)
go func(pc sarama.PartitionConsumer) {
for err := range pc.Errors() {
log.Println(err)
}
}(pc)
}
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
<-signals
for _, pc := range partition_consumers {
fmt.Println("Closing partition, next offset", pc.HighWaterMarkOffset())
pc.AsyncClose()
}
}
示例9: main
func main() {
config := sarama.NewConfig()
// Handle errors manually
config.Consumer.Return.Errors = true
consumer, err := sarama.NewConsumer([]string{kafkaAddr}, config)
if err != nil {
panic(err)
}
defer consumer.Close()
logConsumer, err := consumer.ConsumePartition("buy", 0, sarama.OffsetNewest)
if err != nil {
panic(err)
}
defer logConsumer.Close()
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
for {
select {
case err := <-logConsumer.Errors():
log.Println(err)
case msg := <-logConsumer.Messages():
order := &Order{}
json.Unmarshal(msg.Value, order)
log.Printf("notification to %s with order %s", order.UserID, order.OrderID)
case <-signals:
return
}
}
}
示例10: newConsumer
func newConsumer() (masterConsumer kafka.Consumer, consumers []kafka.PartitionConsumer) {
config := kafka.NewConfig()
config.Net.KeepAlive = 30 * time.Second
config.Consumer.Retry.Backoff = 25 * time.Millisecond
consumers = make([]kafka.PartitionConsumer, 0)
retry(func() (err error) {
var consumer kafka.PartitionConsumer
var partitions []int32
masterConsumer, err = kafka.NewConsumer(kafkas, config)
if err != nil {
return
}
partitions, err = masterConsumer.Partitions(topic)
if err != nil {
return
}
for _, partition := range partitions {
consumer, err = masterConsumer.ConsumePartition(topic, partition, kafka.OffsetNewest)
if err != nil {
return
}
consumers = append(consumers, consumer)
}
return
})
return
}
示例11: Setup
// Setup prepares the Requester for benchmarking.
func (k *kafkaRequester) Setup() error {
config := sarama.NewConfig()
producer, err := sarama.NewAsyncProducer(k.urls, config)
if err != nil {
return err
}
consumer, err := sarama.NewConsumer(k.urls, nil)
if err != nil {
producer.Close()
return err
}
partitionConsumer, err := consumer.ConsumePartition(k.topic, 0, sarama.OffsetNewest)
if err != nil {
producer.Close()
consumer.Close()
return err
}
k.producer = producer
k.consumer = consumer
k.partitionConsumer = partitionConsumer
k.msg = &sarama.ProducerMessage{
Topic: k.topic,
Value: sarama.ByteEncoder(make([]byte, k.payloadSize)),
}
return nil
}
示例12: tailPartitions
func tailPartitions(client *sarama.Client, topic string, partitions []int32) {
var wg sync.WaitGroup
wg.Add(len(partitions))
tailConsumer := func(partition int32) {
defer wg.Done()
consumerConfig := sarama.NewConsumerConfig()
consumerConfig.OffsetMethod = sarama.OffsetMethodManual
consumerConfig.OffsetValue = offset
consumer, err := sarama.NewConsumer(client, topic, partition, "", consumerConfig)
if err != nil {
logger.Fatalf("err creating consumer: %s", err)
}
defer consumer.Close()
for event := range consumer.Events() {
logger.Printf("partition=%d offset=%d key=%s value=%s", event.Partition, event.Offset, event.Key, event.Value)
}
}
for _, partition := range partitions {
go tailConsumer(partition)
}
wg.Wait()
}
示例13: newTestConsumer
func newTestConsumer(t *testing.T) sarama.Consumer {
hosts := []string{getTestKafkaHost()}
consumer, err := sarama.NewConsumer(hosts, nil)
if err != nil {
t.Fatal(err)
}
return consumer
}
示例14: newKafkaConsumer
// Creates a kafka consumer utilizing github.com/Shopify/sarama
func newKafkaConsumer(broker string) (kafka.Consumer, error) {
config := kafka.NewConfig()
consumer, err := kafka.NewConsumer([]string{broker}, config)
if err != nil {
return nil, err
}
return consumer, nil
}
示例15: NewConsumer
func (c *KafkaClient) NewConsumer(hostports []string) error {
consumer, err := sarama.NewConsumer(hostports, nil)
if err != nil {
log.Printf("[kafka] new a consumer %+v error, %s\n", hostports, err)
} else {
log.Printf("[kafka] new a consumer %+v success.\n", hostports)
}
c.Consumer = consumer
return err
}