本文整理汇总了Golang中golang.org/x/time/rate.NewLimiter函数的典型用法代码示例。如果您正苦于以下问题:Golang NewLimiter函数的具体用法?Golang NewLimiter怎么用?Golang NewLimiter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewLimiter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewServer
func NewServer(s *Store, c *conf.Config) (svr *Server, err error) {
svr = &Server{
store: s,
conf: c,
rl: rate.NewLimiter(rate.Limit(c.Limit.Read.Rate), c.Limit.Read.Brust),
wl: rate.NewLimiter(rate.Limit(c.Limit.Write.Rate), c.Limit.Write.Brust),
dl: rate.NewLimiter(rate.Limit(c.Limit.Delete.Rate), c.Limit.Delete.Brust),
}
if svr.statSvr, err = net.Listen("tcp", c.StatListen); err != nil {
log.Errorf("net.Listen(%s) error(%v)", c.StatListen, err)
return
}
if svr.apiSvr, err = net.Listen("tcp", c.ApiListen); err != nil {
log.Errorf("net.Listen(%s) error(%v)", c.ApiListen, err)
return
}
if svr.adminSvr, err = net.Listen("tcp", c.AdminListen); err != nil {
log.Errorf("net.Listen(%s) error(%v)", c.AdminListen, err)
return
}
go svr.startStat()
go svr.startApi()
go svr.startAdmin()
if c.Pprof {
go StartPprof(c.PprofListen)
}
return
}
示例2: NewBot
// NewBot initializes a number of things for proper operation. It will set appropriate flags
// for rlog and then creates a Nimbus config to pass to the internal nimbus IRC client. This
// client is embedded into an instance of Bot and returned. It has its fields initialized.
func NewBot(version string, rconf *Config) *Bot {
rlog.SetFlags(rlog.Linfo | rlog.Lwarn | rlog.Lerror | rlog.Ldebug)
rlog.SetLogFlags(0)
nconf := GetNimbusConfig(rconf)
bot := &Bot{
/* Client */ nimbus.NewClient(rconf.Server.Host, rconf.Server.Port,
rconf.User.Nick, *nconf),
/* Version */ version,
/* Modules */ make(map[string]*Module),
/* Channels */ make(map[string]*Channel),
/* ToJoinChs */ make(map[string]string),
/* Parser */ parser.NewParser(rconf.Command.Prefix),
/* Handler */ NewHandler(),
/* Inlim */ rate.NewLimiter(3/5, 3),
/* Outlim */ rate.NewLimiter(rate.Every(time.Millisecond*750), 1),
/* Config */ rconf,
/* ListenPort */ "0",
/* Quit Chan */ make(chan string),
/* Mutex */ sync.Mutex{},
}
return bot
}
示例3: newLimiter
func newLimiter(cfg *config.Wrapper) *limiter {
l := &limiter{
write: rate.NewLimiter(rate.Inf, limiterBurstSize),
read: rate.NewLimiter(rate.Inf, limiterBurstSize),
}
cfg.Subscribe(l)
prev := config.Configuration{Options: config.OptionsConfiguration{MaxRecvKbps: -1, MaxSendKbps: -1}}
l.CommitConfiguration(prev, cfg.RawCopy())
return l
}
示例4: init
func (m *Manager) init() {
m.mu.Lock()
if m.certCache == nil {
m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20)
m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20)
m.certCache = map[string]*cacheEntry{}
m.certTokens = map[string]*tls.Certificate{}
m.watchChan = make(chan struct{}, 1)
m.watchChan <- struct{}{}
}
m.mu.Unlock()
}
示例5: NewVaultClient
// NewVaultClient returns a Vault client from the given config. If the client
// couldn't be made an error is returned.
func NewVaultClient(c *config.VaultConfig, logger *log.Logger, purgeFn PurgeVaultAccessorFn) (*vaultClient, error) {
if c == nil {
return nil, fmt.Errorf("must pass valid VaultConfig")
}
if logger == nil {
return nil, fmt.Errorf("must pass valid logger")
}
v := &vaultClient{
config: c,
logger: logger,
limiter: rate.NewLimiter(requestRateLimit, int(requestRateLimit)),
revoking: make(map[*structs.VaultAccessor]time.Time),
purgeFn: purgeFn,
tomb: &tomb.Tomb{},
}
if v.config.Enabled {
if err := v.buildClient(); err != nil {
return nil, err
}
// Launch the required goroutines
v.tomb.Go(wrapNilError(v.establishConnection))
v.tomb.Go(wrapNilError(v.revokeDaemon))
v.running = true
}
return v, nil
}
示例6: NewWatchProxy
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
wp := &watchProxy{
cw: c.Watcher,
ctx: clientv3.WithRequireLeader(c.Ctx()),
retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond),
leaderc: make(chan struct{}),
}
wp.ranges = newWatchRanges(wp)
go func() {
// a new streams without opening any watchers won't catch
// a lost leader event, so have a special watch to monitor it
rev := int64((uint64(1) << 63) - 2)
for wp.ctx.Err() == nil {
wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev))
for range wch {
}
wp.mu.Lock()
close(wp.leaderc)
wp.leaderc = make(chan struct{})
wp.mu.Unlock()
wp.retryLimiter.Wait(wp.ctx)
}
wp.mu.Lock()
<-wp.ctx.Done()
wp.mu.Unlock()
wp.wg.Wait()
wp.ranges.stop()
}()
return wp
}
示例7: NewLimitedSampler
// NewLimitedSampler returns a sampling policy that randomly samples a given
// fraction of requests. It also enforces a limit on the number of traces per
// second. It tries to trace every request with a trace header, but will not
// exceed the qps limit to do it.
func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) {
if !(fraction >= 0) {
return nil, fmt.Errorf("invalid fraction %f", fraction)
}
if !(maxqps >= 0) {
return nil, fmt.Errorf("invalid maxqps %f", maxqps)
}
// Set a limit on the number of accumulated "tokens", to limit bursts of
// traced requests. Use one more than a second's worth of tokens, or 100,
// whichever is smaller.
// See https://godoc.org/golang.org/x/time/rate#NewLimiter.
maxTokens := 100
if maxqps < 99.0 {
maxTokens = 1 + int(maxqps)
}
var seed int64
if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil {
seed = time.Now().UnixNano()
}
s := sampler{
fraction: fraction,
Limiter: rate.NewLimiter(rate.Limit(maxqps), maxTokens),
Rand: rand.New(rand.NewSource(seed)),
}
return &s, nil
}
示例8: Test_Receiver_flushDs
func Test_Receiver_flushDs(t *testing.T) {
// So we need to test that this calls queueblocking...
r := &Receiver{flusherChs: make([]chan *dsFlushRequest, 1), flushLimiter: rate.NewLimiter(10, 10)}
r.flusherChs[0] = make(chan *dsFlushRequest)
called := 0
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
if _, ok := <-r.flusherChs[0]; !ok {
break
}
called++
}
}()
ds := rrd.NewDataSource(0, "", 0, 0, time.Time{}, 0)
rra, _ := rrd.NewRoundRobinArchive(0, 0, "WMEAN", time.Second, 10, 10, 0, time.Time{})
ds.SetRRAs([]*rrd.RoundRobinArchive{rra})
ds.ProcessIncomingDataPoint(10, time.Unix(100, 0))
ds.ProcessIncomingDataPoint(10, time.Unix(101, 0))
rds := &receiverDs{DataSource: ds}
r.SetMaxFlushRate(1)
r.flushDs(rds, false)
r.flushDs(rds, false)
close(r.flusherChs[0])
wg.Wait()
if called != 1 {
t.Errorf("flushDs call count not 1: %d", called)
}
if ds.PointCount() != 0 {
t.Errorf("ClearRRAs was not called by flushDs")
}
}
示例9: Stress
func (s *stresser) Stress() error {
// TODO: add backoff option
conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
if err != nil {
return fmt.Errorf("%v (%s)", err, s.Endpoint)
}
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(s.N)
s.mu.Lock()
s.conn = conn
s.cancel = cancel
s.wg = wg
s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
s.mu.Unlock()
kvc := pb.NewKVClient(conn)
for i := 0; i < s.N; i++ {
go s.run(ctx, kvc)
}
plog.Printf("stresser %q is started", s.Endpoint)
return nil
}
示例10: connMonitor
// connMonitor monitors the connection and handles retries
func (c *Client) connMonitor() {
var err error
defer func() {
_, err = c.retryConnection(c.ctx.Err())
c.mu.Lock()
c.lastConnErr = err
close(c.newconnc)
c.mu.Unlock()
}()
limiter := rate.NewLimiter(rate.Every(minConnRetryWait), 1)
for limiter.Wait(c.ctx) == nil {
select {
case err = <-c.reconnc:
case <-c.ctx.Done():
return
}
conn, connErr := c.retryConnection(err)
c.mu.Lock()
c.lastConnErr = connErr
c.conn = conn
close(c.newconnc)
c.newconnc = make(chan struct{})
c.reconnc = make(chan error, 1)
c.mu.Unlock()
}
}
示例11: LimitReached
// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens.
func (l *Limiter) LimitReached(key string) bool {
l.Lock()
defer l.Unlock()
if _, found := l.tokenBuckets[key]; !found {
l.tokenBuckets[key] = rate.NewLimiter(rate.Every(l.TTL), int(l.Max))
}
return !l.tokenBuckets[key].AllowN(time.Now(), 1)
}
示例12: NewProgressReader
// NewProgressReader creates a new ProgressReader.
func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader {
return &Reader{
in: in,
out: out,
size: size,
id: id,
action: action,
rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),
}
}
示例13: TestRateLimiting
func TestRateLimiting(t *testing.T) {
inputSize := 100
input := make([]byte, 0, inputSize*2)
for i := 0; i < inputSize; i++ {
inputLine := []byte{byte((i % 26) + 65), newLine}
input = append(input, inputLine...)
}
fmt.Printf("input: %d", len(input))
bridgeCapacity := 6
reader := bytes.NewReader(input)
lineLimit := 3
metReg := metrics.NewRegistry()
lb := NewLogBridge(reader,
ioutil.Discard,
ioutil.Discard,
logging.DefaultLogger,
lineLimit,
1024,
metReg,
"log_lines",
"log_bytes",
"dropped_lines",
"time_spent_throttled_ms")
// We're testing these, so we finely control their parameters
lb.logLineRateLimit = rate.NewLimiter(rate.Limit(inputSize), inputSize)
lb.logByteRateLimit = rate.NewLimiter(rate.Limit(1024), 1024)
lb.LossyCopy(reader, bridgeCapacity)
loggedLines := lb.logLinesCount.Count()
droppedLines := lb.droppedLineCount.Count()
if loggedLines == 0 {
t.Errorf("Expected some logs to get through.")
}
if loggedLines == int64(inputSize) {
t.Errorf("Expected some lines to get dropped")
}
if droppedLines == 0 {
t.Errorf("Expected dropped lines to be non-zero")
}
}
示例14: main
func main() {
st := time.Now()
i := 0
limiter := rate.NewLimiter(rate.Every(time.Second), 100)
ctx, cancel := context.WithTimeout(context.TODO(), 2*time.Second)
for limiter.Wait(ctx) == nil {
i++
}
cancel()
fmt.Println(i, "DONE. Took", time.Since(st))
// 101 DONE. Took 1.00013873s
}
示例15: warningFor
func warningFor(dev protocol.DeviceID, msg string) {
warningLimitersMut.Lock()
defer warningLimitersMut.Unlock()
lim, ok := warningLimiters[dev]
if !ok {
lim = rate.NewLimiter(rate.Every(perDeviceWarningIntv), 1)
warningLimiters[dev] = lim
}
if lim.Allow() {
l.Warnln(msg)
}
}