本文整理汇总了Golang中github.com/Shopify/sarama.NewClient函数的典型用法代码示例。如果您正苦于以下问题:Golang NewClient函数的具体用法?Golang NewClient怎么用?Golang NewClient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewClient函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewKafka
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig())
subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig())
topic := "test"
pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig())
consumerConfig := sarama.NewConsumerConfig()
consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages
consumerConfig.DefaultFetchSize = 10 * 1024 * 1024
sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig)
var handler benchmark.MessageHandler
if testLatency {
handler = &benchmark.LatencyMessageHandler{
NumberOfMessages: numberOfMessages,
Latencies: []float32{},
}
} else {
handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
}
return &Kafka{
handler: handler,
pubClient: pubClient,
subClient: subClient,
pub: pub,
sub: sub,
topic: topic,
}
}
示例2: main
func main() {
client, err := sarama.NewClient("a_logger_for_mhub", []string{"localhost:9092"}, nil)
if err != nil {
panic(err)
} else {
os.Stderr.WriteString("> connected\n")
}
defer client.Close()
consumer, err := sarama.NewConsumer(client, "received", 0, "", nil)
if err != nil {
panic(err)
} else {
os.Stderr.WriteString("> consumer ready\n")
}
defer consumer.Close()
for {
select {
case event := <-consumer.Events():
if event.Err != nil {
panic(event.Err)
}
fmt.Println(utf8.FullRune(event.Value))
}
}
}
示例3: NewPeer
// NewPeer creates and returns a new Peer for communicating with Kafka.
func NewPeer(host string) (*Peer, error) {
host = strings.Split(host, ":")[0] + ":9092"
config := sarama.NewConfig()
client, err := sarama.NewClient([]string{host}, config)
if err != nil {
return nil, err
}
producer, err := sarama.NewAsyncProducer([]string{host}, config)
if err != nil {
return nil, err
}
consumer, err := sarama.NewConsumer([]string{host}, config)
if err != nil {
return nil, err
}
partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
return nil, err
}
return &Peer{
client: client,
producer: producer,
consumer: partitionConsumer,
send: make(chan []byte),
errors: make(chan error, 1),
done: make(chan bool),
}, nil
}
示例4: clusterSummary
func (this *Topics) clusterSummary(zkcluster *zk.ZkCluster) []topicSummary {
r := make([]topicSummary, 0, 10)
kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
if err != nil {
this.Ui.Error(err.Error())
return nil
}
defer kfk.Close()
topicInfos, _ := kfk.Topics()
for _, t := range topicInfos {
flat := int64(0)
cum := int64(0)
alivePartitions, _ := kfk.WritablePartitions(t)
for _, partitionID := range alivePartitions {
latestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetNewest)
oldestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetOldest)
flat += (latestOffset - oldestOffset)
cum += latestOffset
}
r = append(r, topicSummary{zkcluster.ZkZone().Name(), zkcluster.Name(), t, len(alivePartitions), flat, cum})
}
return r
}
示例5: NewKafkaDeliver
func NewKafkaDeliver(store *Store, clientId string, brokerList []string) (*KafkaDeliver, error) {
log.Println("go=kafka at=new-kafka-deliver")
clientConfig := sarama.NewClientConfig()
producerConfig := sarama.NewProducerConfig()
client, err := sarama.NewClient(clientId, brokerList, clientConfig)
if err != nil {
return nil, err
}
log.Println("go=kafka at=created-client")
producer, err := sarama.NewProducer(client, producerConfig)
if err != nil {
return nil, err
}
log.Println("go=kafka at=created-producer")
return &KafkaDeliver{
clientId: clientId,
brokerList: brokerList,
store: store,
producer: producer,
producerConfig: producerConfig,
client: client,
clientConfig: clientConfig,
deliverGoroutines: 8,
shutdownDeliver: make(chan bool, 8),
shutdown: make(chan bool, 8),
}, nil
}
示例6: CreateKafkaTopic
func CreateKafkaTopic() *KafkaTopic {
client, err := sarama.NewClient([]string{"kafka:9092"}, sarama.NewConfig())
if err != nil {
panic(err)
} else {
fmt.Printf("Kafka Client connected: %v\n", client)
}
topic := "http-request"
producer, err := sarama.NewAsyncProducerFromClient(client)
if err != nil {
panic(err)
} else {
fmt.Printf("Kafka Producer connected: %v\n", producer)
}
producable := producer.Input()
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
panic(err)
} else {
fmt.Printf("Kafka Consumer connected: %v\n", consumer)
}
consumable, err := consumer.ConsumePartition(topic, 0, 0)
if err != nil {
panic(err)
}
return &KafkaTopic{client, topic, producer, producable, consumer, consumable}
}
示例7: diagnose
func (this *Ping) diagnose() {
this.zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
registeredBrokers := zkcluster.RegisteredInfo().Roster
for _, broker := range registeredBrokers {
log.Debug("ping %s", broker.Addr())
kfk, err := sarama.NewClient([]string{broker.Addr()}, sarama.NewConfig())
if err != nil {
log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))
continue
}
_, err = kfk.Topics() // kafka didn't provide ping, so use Topics() as ping
if err != nil {
log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))
} else {
if !this.problematicMode {
log.Info("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Green("ok"))
}
}
kfk.Close()
}
})
}
示例8: newKafkaClient
func newKafkaClient(proc int, brokerList []string, hostname string) (sarama.Client, error) {
sarama.MaxRequestSize = 100 * 1024 * 1024
sarama.MaxResponseSize = 100 * 1024 * 1024
config := sarama.NewConfig()
config.Net.MaxOpenRequests = proc * 2
config.Producer.MaxMessageBytes = int(sarama.MaxRequestSize)
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Metadata.RefreshFrequency = 10 * time.Second
config.ClientID = "indexer"
// config.Producer.Compression = sarama.CompressionGZIP
// config.Producer.Flush.MaxMessages = 10000
cl, err := sarama.NewClient(brokerList, config)
if err != nil {
return nil, err
}
// partitionerCreator := func(topic string) sarama.Partitioner {
// return newLocalAwarePartitioner(cl, topic, hostname)
// }
// config.Producer.Partitioner = partitionerCreator
return cl, nil
}
示例9: NewKafka
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
config := sarama.NewConfig()
client, _ := sarama.NewClient([]string{"localhost:9092"}, config)
topic := "test"
pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config)
consumer, _ := sarama.NewConsumerFromClient(client)
sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
var handler benchmark.MessageHandler
if testLatency {
handler = &benchmark.LatencyMessageHandler{
NumberOfMessages: numberOfMessages,
Latencies: []float32{},
}
} else {
handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
}
return &Kafka{
handler: handler,
client: client,
pub: pub,
sub: sub,
topic: topic,
}
}
示例10: TestSendData
func TestSendData(t *testing.T) {
kafkaClient, err := sarama.NewClient(brokerList, config)
if err != nil {
panic(err)
}
defer kafkaClient.Close()
partitionID, err := kafkaClient.Partitions(topicsInit[0])
convey.Convey("err should be nil", t, func() {
convey.So(err, convey.ShouldEqual, nil)
})
convey.Convey("partitionID should not be nil ", t, func() {
convey.So(partitionID, convey.ShouldNotEqual, nil)
})
offset, err := kafkaClient.GetOffset("test", partitionID[0], sarama.OffsetOldest)
convey.Convey("err should be nil", t, func() {
convey.So(err, convey.ShouldEqual, nil)
})
producer.NewProducer(brokerList, topicsInit, config)
producer.SendData(topicsInit[0], "init message")
offset2, err := kafkaClient.GetOffset("test", partitionID[0], sarama.OffsetOldest)
convey.Convey("err should be nil", t, func() {
convey.So(err, convey.ShouldEqual, nil)
})
if offset == 0 {
convey.Convey("offset2 should not be equal to offset ", t, func() {
convey.So(offset2, convey.ShouldEqual, offset)
})
} else {
convey.Convey("offset2 should not be equal to offset + 1 ", t, func() {
convey.So(offset2, convey.ShouldEqual, offset+1)
})
}
}
示例11: Produce
func Produce(Quit chan bool, Host []string, Topic string, Data chan []byte) {
client, err := sarama.NewClient("crontab_client", Host, sarama.NewClientConfig())
if err != nil {
panic(err)
} else {
log.Println("kafka producer connected")
}
defer client.Close()
cfg := sarama.NewProducerConfig()
cfg.Partitioner = sarama.NewRoundRobinPartitioner
producer, err := sarama.NewProducer(client, cfg)
if err != nil {
panic(err)
}
defer producer.Close()
log.Println("kafka producer ready")
for {
select {
case pack := <-Data:
producer.Input() <- &sarama.MessageToSend{Topic: Topic, Key: nil, Value: sarama.ByteEncoder(pack)}
case err := <-producer.Errors():
log.Println(err)
case <-Quit:
break
}
}
}
示例12: saramaClient
func saramaClient() sarama.Client {
client, err := sarama.NewClient(kafkaPeers, nil)
if err != nil {
panic(err)
}
return client
}
示例13: NewClient
// NewClient returns a Kafka client
func NewClient(addresses []string) (sarama.Client, error) {
config := sarama.NewConfig()
hostname, err := os.Hostname()
if err != nil {
hostname = ""
}
config.ClientID = hostname
config.Producer.Compression = sarama.CompressionSnappy
config.Producer.Return.Successes = true
var client sarama.Client
retries := outOfBrokersRetries + 1
for retries > 0 {
client, err = sarama.NewClient(addresses, config)
retries--
if err == sarama.ErrOutOfBrokers {
glog.Errorf("Can't connect to the Kafka cluster at %s (%d retries left): %s",
addresses, retries, err)
time.Sleep(outOfBrokersBackoff)
} else {
break
}
}
return client, err
}
示例14: tryOpenConnection
func (prod *Kafka) tryOpenConnection() bool {
// Reconnect the client first
if prod.client == nil {
if client, err := kafka.NewClient(prod.servers, prod.config); err == nil {
prod.client = client
} else {
Log.Error.Print("Kafka client error:", err)
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
// Make sure we have a producer up and running
if prod.producer == nil {
if producer, err := kafka.NewAsyncProducerFromClient(prod.client); err == nil {
prod.producer = producer
} else {
Log.Error.Print("Kafka producer error:", err)
prod.client.Close()
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
prod.Control() <- core.PluginControlFuseActive
return true
}
示例15: generateKafkaData
func generateKafkaData(t *testing.T, topic string) {
config := sarama.NewConfig()
client, err := sarama.NewClient([]string{getTestKafkaHost()}, config)
if err != nil {
t.Errorf("%s", err)
}
producer, err := sarama.NewSyncProducerFromClient(client)
if err != nil {
t.Error(err)
}
defer producer.Close()
msg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.StringEncoder("Hello World"),
}
_, _, err = producer.SendMessage(msg)
if err != nil {
t.Errorf("FAILED to send message: %s\n", err)
}
client.RefreshMetadata(topic)
}