本文整理匯總了Golang中github.com/eapache/go-resiliency/breaker.New函數的典型用法代碼示例。如果您正苦於以下問題:Golang New函數的具體用法?Golang New怎麽用?Golang New使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了New函數的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: partitionDispatcher
// one per topic
// partitions messages, then dispatches them by partition
func (p *asyncProducer) partitionDispatcher(topic string, input <-chan *ProducerMessage) {
handlers := make(map[int32]chan *ProducerMessage)
partitioner := p.conf.Producer.Partitioner(topic)
breaker := breaker.New(3, 1, 10*time.Second)
for msg := range input {
if msg.retries == 0 {
err := breaker.Run(func() error {
return p.assignPartition(partitioner, msg)
})
if err != nil {
p.returnError(msg, err)
continue
}
}
handler := handlers[msg.Partition]
if handler == nil {
newHandler := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
topic := msg.Topic // block local because go's closure semantics suck
partition := msg.Partition // block local because go's closure semantics suck
go withRecover(func() { p.leaderDispatcher(topic, partition, newHandler) })
handler = newHandler
handlers[msg.Partition] = handler
}
handler <- msg
}
for _, handler := range handlers {
close(handler)
}
}
示例2: newTopicProducer
func (p *asyncProducer) newTopicProducer(topic string, input <-chan *ProducerMessage) *topicProducer {
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return tp
}
示例3: newTopicProducer
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan<- *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return input
}
示例4: newPartitionProducer
func (p *asyncProducer) newPartitionProducer(topic string, partition int32, input <-chan *ProducerMessage) *partitionProducer {
pp := &partitionProducer{
parent: p,
topic: topic,
partition: partition,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
}
go withRecover(pp.dispatch)
return pp
}
示例5: leaderDispatcher
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
func (p *asyncProducer) leaderDispatcher(topic string, partition int32, input <-chan *ProducerMessage) {
var leader *Broker
var output chan *ProducerMessage
breaker := breaker.New(3, 1, 10*time.Second)
doUpdate := func() (err error) {
if err = p.client.RefreshMetadata(topic); err != nil {
return err
}
if leader, err = p.client.Leader(topic, partition); err != nil {
return err
}
output = p.getBrokerProducer(leader)
return nil
}
// try to prefetch the leader; if this doesn't work, we'll do a proper breaker-protected refresh-and-fetch
// on the first message
leader, _ = p.client.Leader(topic, partition)
if leader != nil {
output = p.getBrokerProducer(leader)
}
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
// retryState[msg.retries].expectChaser simply tracks whether we've seen a chaser message for a given level (and
// therefore whether our buffer is complete and safe to flush)
highWatermark := 0
retryState := make([]struct {
buf []*ProducerMessage
expectChaser bool
}, p.conf.Producer.Retry.Max+1)
for msg := range input {
if msg.retries > highWatermark {
// new, higher, retry level; send off a chaser so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
highWatermark = msg.retries
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", topic, partition, highWatermark)
retryState[msg.retries].expectChaser = true
p.inFlight.Add(1) // we're generating a chaser message; track it so we don't shut down while it's still inflight
output <- &ProducerMessage{Topic: topic, Partition: partition, flags: chaser, retries: msg.retries - 1}
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", topic, partition, leader.ID())
p.unrefBrokerProducer(leader, output)
output = nil
time.Sleep(p.conf.Producer.Retry.Backoff)
} else if highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < highWatermark {
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a chaser)
if msg.flags&chaser == chaser {
retryState[msg.retries].expectChaser = false
p.inFlight.Done() // this chaser is now handled and will be garbage collected
} else {
retryState[msg.retries].buf = append(retryState[msg.retries].buf, msg)
}
continue
} else if msg.flags&chaser == chaser {
// this message is of the current retry level (msg.retries == highWatermark) and the chaser flag is set,
// meaning this retry level is done and we can go down (at least) one level and flush that
retryState[highWatermark].expectChaser = false
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", topic, partition, highWatermark)
for {
highWatermark--
if output == nil {
if err := breaker.Run(doUpdate); err != nil {
p.returnErrors(retryState[highWatermark].buf, err)
goto flushDone
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", topic, partition, leader.ID())
}
for _, msg := range retryState[highWatermark].buf {
output <- msg
}
flushDone:
retryState[highWatermark].buf = nil
if retryState[highWatermark].expectChaser {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", topic, partition, highWatermark)
break
} else {
if highWatermark == 0 {
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", topic, partition)
break
}
}
}
p.inFlight.Done() // this chaser is now handled and will be garbage collected
continue
}
}
//.........這裏部分代碼省略.........
示例6: leaderDispatcher
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
func (p *Producer) leaderDispatcher(topic string, partition int32, input chan *MessageToSend) {
var leader *Broker
var output chan *MessageToSend
var backlog []*MessageToSend
breaker := breaker.New(3, 1, 10*time.Second)
doUpdate := func() (err error) {
if err = p.client.RefreshTopicMetadata(topic); err != nil {
return err
}
if leader, err = p.client.Leader(topic, partition); err != nil {
return err
}
output = p.getBrokerWorker(leader)
return nil
}
// try to prefetch the leader; if this doesn't work, we'll do a proper breaker-protected refresh-and-fetch
// on the first message
leader, _ = p.client.Leader(topic, partition)
if leader != nil {
output = p.getBrokerWorker(leader)
}
for msg := range input {
if msg.flags&retried == 0 {
// normal case
if backlog != nil {
backlog = append(backlog, msg)
continue
}
} else if msg.flags&chaser == 0 {
// retry flag set, chaser flag not set
if backlog == nil {
// on the very first retried message we send off a chaser so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
Logger.Printf("producer/leader state change to [retrying] on %s/%d\n", topic, partition)
output <- &MessageToSend{Topic: topic, partition: partition, flags: chaser}
backlog = make([]*MessageToSend, 0)
p.unrefBrokerWorker(leader)
output = nil
time.Sleep(p.config.RetryBackoff)
}
} else {
// retry *and* chaser flag set, flush the backlog and return to normal processing
Logger.Printf("producer/leader state change to [flushing] on %s/%d\n", topic, partition)
if output == nil {
if err := breaker.Run(doUpdate); err != nil {
p.returnErrors(backlog, err)
backlog = nil
continue
}
}
for _, msg := range backlog {
output <- msg
}
Logger.Printf("producer/leader state change to [normal] on %s/%d\n", topic, partition)
backlog = nil
continue
}
if output == nil {
if err := breaker.Run(doUpdate); err != nil {
p.returnError(msg, err)
continue
}
}
output <- msg
}
p.unrefBrokerWorker(leader)
p.retries <- &MessageToSend{flags: unref}
}