本文整理汇总了Golang中golang.org/x/time/rate.Every函数的典型用法代码示例。如果您正苦于以下问题:Golang Every函数的具体用法?Golang Every怎么用?Golang Every使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Every函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: init
func (m *Manager) init() {
m.mu.Lock()
if m.certCache == nil {
m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20)
m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20)
m.certCache = map[string]*cacheEntry{}
m.certTokens = map[string]*tls.Certificate{}
m.watchChan = make(chan struct{}, 1)
m.watchChan <- struct{}{}
}
m.mu.Unlock()
}
示例2: Stress
func (s *stresser) Stress() error {
// TODO: add backoff option
conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
if err != nil {
return fmt.Errorf("%v (%s)", err, s.Endpoint)
}
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(s.N)
s.mu.Lock()
s.conn = conn
s.cancel = cancel
s.wg = wg
s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
s.mu.Unlock()
kvc := pb.NewKVClient(conn)
for i := 0; i < s.N; i++ {
go s.run(ctx, kvc)
}
plog.Printf("stresser %q is started", s.Endpoint)
return nil
}
示例3: NewBot
// NewBot initializes a number of things for proper operation. It will set appropriate flags
// for rlog and then creates a Nimbus config to pass to the internal nimbus IRC client. This
// client is embedded into an instance of Bot and returned. It has its fields initialized.
func NewBot(version string, rconf *Config) *Bot {
rlog.SetFlags(rlog.Linfo | rlog.Lwarn | rlog.Lerror | rlog.Ldebug)
rlog.SetLogFlags(0)
nconf := GetNimbusConfig(rconf)
bot := &Bot{
/* Client */ nimbus.NewClient(rconf.Server.Host, rconf.Server.Port,
rconf.User.Nick, *nconf),
/* Version */ version,
/* Modules */ make(map[string]*Module),
/* Channels */ make(map[string]*Channel),
/* ToJoinChs */ make(map[string]string),
/* Parser */ parser.NewParser(rconf.Command.Prefix),
/* Handler */ NewHandler(),
/* Inlim */ rate.NewLimiter(3/5, 3),
/* Outlim */ rate.NewLimiter(rate.Every(time.Millisecond*750), 1),
/* Config */ rconf,
/* ListenPort */ "0",
/* Quit Chan */ make(chan string),
/* Mutex */ sync.Mutex{},
}
return bot
}
示例4: pullImage
func (c *containerAdapter) pullImage(ctx context.Context) error {
rc, err := c.client.ImagePull(ctx, c.container.image(), c.container.imagePullOptions())
if err != nil {
return err
}
dec := json.NewDecoder(rc)
dec.UseNumber()
m := map[string]interface{}{}
spamLimiter := rate.NewLimiter(rate.Every(1000*time.Millisecond), 1)
lastStatus := ""
for {
if err := dec.Decode(&m); err != nil {
if err == io.EOF {
break
}
return err
}
l := log.G(ctx)
// limit pull progress logs unless the status changes
if spamLimiter.Allow() || lastStatus != m["status"] {
// if we have progress details, we have everything we need
if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
// first, log the image and status
l = l.WithFields(logrus.Fields{
"image": c.container.image(),
"status": m["status"],
})
// then, if we have progress, log the progress
if progress["current"] != nil && progress["total"] != nil {
l = l.WithFields(logrus.Fields{
"current": progress["current"],
"total": progress["total"],
})
}
}
l.Debug("pull in progress")
}
// sometimes, we get no useful information at all, and add no fields
if status, ok := m["status"].(string); ok {
lastStatus = status
}
}
// if the final stream object contained an error, return it
if errMsg, ok := m["error"]; ok {
return errors.Errorf("%v", errMsg)
}
return nil
}
示例5: connMonitor
// connMonitor monitors the connection and handles retries
func (c *Client) connMonitor() {
var err error
defer func() {
_, err = c.retryConnection(c.ctx.Err())
c.mu.Lock()
c.lastConnErr = err
close(c.newconnc)
c.mu.Unlock()
}()
limiter := rate.NewLimiter(rate.Every(minConnRetryWait), 1)
for limiter.Wait(c.ctx) == nil {
select {
case err = <-c.reconnc:
case <-c.ctx.Done():
return
}
conn, connErr := c.retryConnection(err)
c.mu.Lock()
c.lastConnErr = connErr
c.conn = conn
close(c.newconnc)
c.newconnc = make(chan struct{})
c.reconnc = make(chan error, 1)
c.mu.Unlock()
}
}
示例6: LimitReached
// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens.
func (l *Limiter) LimitReached(key string) bool {
l.Lock()
defer l.Unlock()
if _, found := l.tokenBuckets[key]; !found {
l.tokenBuckets[key] = rate.NewLimiter(rate.Every(l.TTL), int(l.Max))
}
return !l.tokenBuckets[key].AllowN(time.Now(), 1)
}
示例7: NewProgressReader
// NewProgressReader creates a new ProgressReader.
func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader {
return &Reader{
in: in,
out: out,
size: size,
id: id,
action: action,
rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),
}
}
示例8: main
func main() {
st := time.Now()
i := 0
limiter := rate.NewLimiter(rate.Every(time.Second), 100)
ctx, cancel := context.WithTimeout(context.TODO(), 2*time.Second)
for limiter.Wait(ctx) == nil {
i++
}
cancel()
fmt.Println(i, "DONE. Took", time.Since(st))
// 101 DONE. Took 1.00013873s
}
示例9: warningFor
func warningFor(dev protocol.DeviceID, msg string) {
warningLimitersMut.Lock()
defer warningLimitersMut.Unlock()
lim, ok := warningLimiters[dev]
if !ok {
lim = rate.NewLimiter(rate.Every(perDeviceWarningIntv), 1)
warningLimiters[dev] = lim
}
if lim.Allow() {
l.Warnln(msg)
}
}
示例10: Reserve
// Reserve returns how long the crawler should wait before crawling this
// URL.
func (l *Limiter) Reserve(u *url.URL) time.Duration {
l.mu.Lock()
defer l.mu.Unlock()
h := u.Host
v, ok := l.host[h]
if !ok {
d, burst := l.query(h)
v = &entry{
limiter: rate.NewLimiter(rate.Every(d), burst),
}
l.host[h] = v
} else if l.updatable && v.count >= l.freq {
d, _ := l.query(h)
v.limiter.SetLimit(rate.Every(d))
v.count = 0
}
if l.updatable {
v.count++
}
return v.limiter.Reserve().Delay()
}
示例11: main
func main() {
var (
num int
mu sync.Mutex
qps = 10
wg sync.WaitGroup
N = 10000
)
wg.Add(N)
limiter := rate.NewLimiter(rate.Every(time.Second), qps)
for i := 0; i < N; i++ {
go func(i int) {
defer wg.Done()
for limiter.Wait(context.TODO()) == nil {
mu.Lock()
num++
mu.Unlock()
}
}(i)
}
time.Sleep(time.Second)
mu.Lock()
fmt.Println("num:", num)
mu.Unlock()
fmt.Println("burst:", limiter.Burst())
fmt.Println("blocking...")
donec := make(chan struct{})
go func() {
wg.Wait()
close(donec)
}()
select {
case <-donec:
fmt.Println("Done!")
case <-time.After(time.Second):
fmt.Println("Timed out!")
}
}
示例12: main
func main() {
var (
num int
mu sync.Mutex
qps = 10
wg sync.WaitGroup
N = 10000
)
wg.Add(N)
ctx, cancel := context.WithCancel(context.Background())
limiter := rate.NewLimiter(rate.Every(time.Second), qps)
for i := 0; i < N; i++ {
go func() {
defer wg.Done()
for {
if err := limiter.Wait(ctx); err == context.Canceled {
return
}
mu.Lock()
num++
mu.Unlock()
}
}()
}
time.Sleep(time.Second)
mu.Lock()
fmt.Println("num:", num)
mu.Unlock()
fmt.Println("burst:", limiter.Burst())
fmt.Println("canceling...")
cancel()
wg.Wait()
fmt.Println("Done!")
}
示例13: Start
func (s *stresser) Start() {
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(s.N)
s.mu.Lock()
s.wg = wg
s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
// s.rateLimiter = rate.NewLimiter(rate.Limit(s.qps), s.qps)
s.cancel = cancel
s.canceled = false
s.mu.Unlock()
for i := 0; i < s.N; i++ {
go s.run(ctx)
}
<-ctx.Done()
fmt.Println("Start finished with", ctx.Err())
}
示例14: limit
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration, burst int) bool {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return false
}
lock.RLock()
bkt, ok := cache.Get(host)
lock.RUnlock()
if ok {
bkt := bkt.(*rate.Limiter)
if !bkt.Allow() {
// Rate limit
return true
}
} else {
lock.Lock()
cache.Add(host, rate.NewLimiter(rate.Every(intv), burst))
lock.Unlock()
}
return false
}
示例15: Stress
func (s *stresser) Stress() error {
// TODO: add backoff option
conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
if err != nil {
return fmt.Errorf("%v (%s)", err, s.Endpoint)
}
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(s.N)
s.mu.Lock()
s.conn = conn
s.cancel = cancel
s.wg = wg
s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
s.mu.Unlock()
kvc := pb.NewKVClient(conn)
var stressEntries = []stressEntry{
{weight: 0.7, f: newStressPut(kvc, s.KeySuffixRange, s.KeySize)},
{weight: 0.07, f: newStressRange(kvc, s.KeySuffixRange)},
{weight: 0.07, f: newStressRangePrefix(kvc, s.KeySuffixRange)},
{weight: 0.07, f: newStressDelete(kvc, s.KeySuffixRange)},
{weight: 0.07, f: newStressDeletePrefix(kvc, s.KeySuffixRange)},
}
s.stressTable = createStressTable(stressEntries)
for i := 0; i < s.N; i++ {
go s.run(ctx, kvc)
}
plog.Printf("stresser %q is started", s.Endpoint)
return nil
}