本文整理汇总了Golang中github.com/juju/ratelimit.NewBucketWithRate函数的典型用法代码示例。如果您正苦于以下问题:Golang NewBucketWithRate函数的具体用法?Golang NewBucketWithRate怎么用?Golang NewBucketWithRate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewBucketWithRate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewSlowListener
// NewSlowListener creates a SlowListener with specified read and write rates.
func NewSlowListener(listener net.Listener, readrate float64, writerate float64) net.Listener {
return &SlowListener{
listener: listener,
readbucket: ratelimit.NewBucketWithRate(readrate, capacity),
writebucket: ratelimit.NewBucketWithRate(writerate, capacity),
}
}
示例2: NewSlowListener
// NewSlowListener creates a SlowListener with specified read and write rates.
// Both the readrate and the writerate are specified in bytes per second. A
// value of 0 disables throttling.
func NewSlowListener(listener net.Listener, readrate uint, writerate uint) net.Listener {
if readrate == 0 {
readrate = MaxRate
}
if writerate == 0 {
writerate = MaxRate
}
return &SlowListener{
listener: listener,
readbucket: ratelimit.NewBucketWithRate(float64(readrate), capacity),
writebucket: ratelimit.NewBucketWithRate(float64(writerate), capacity),
}
}
示例3: NewService
func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder,
bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service {
service := &Service{
Supervisor: suture.NewSimple("connections.Service"),
cfg: cfg,
myID: myID,
model: mdl,
tlsCfg: tlsCfg,
discoverer: discoverer,
conns: make(chan IntermediateConnection),
bepProtocolName: bepProtocolName,
tlsDefaultCommonName: tlsDefaultCommonName,
lans: lans,
natService: nat.NewService(myID, cfg),
listenersMut: sync.NewRWMutex(),
listeners: make(map[string]genericListener),
listenerTokens: make(map[string]suture.ServiceToken),
curConMut: sync.NewMutex(),
currentConnection: make(map[protocol.DeviceID]Connection),
}
cfg.Subscribe(service)
// The rate variables are in KiB/s in the UI (despite the camel casing
// of the name). We multiply by 1024 here to get B/s.
options := service.cfg.Options()
if options.MaxSendKbps > 0 {
service.writeRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxSendKbps), int64(5*1024*options.MaxSendKbps))
}
if options.MaxRecvKbps > 0 {
service.readRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxRecvKbps), int64(5*1024*options.MaxRecvKbps))
}
// There are several moving parts here; one routine per listening address
// (handled in configuration changing) to handle incoming connections,
// one routine to periodically attempt outgoing connections, one routine to
// the the common handling regardless of whether the connection was
// incoming or outgoing.
service.Add(serviceFunc(service.connect))
service.Add(serviceFunc(service.handle))
raw := cfg.Raw()
// Actually starts the listeners and NAT service
service.CommitConfiguration(raw, raw)
return service
}
示例4: DoParallelContainerStopBenchmark
// DoParallelContainerStopBenchmark starts routineNumber of goroutines and let them stop containers, returns latencies
// of all the stopping calls in nanoseconds. There is a global rate limit on stopping calls per second.
func DoParallelContainerStopBenchmark(client *docker.Client, qps float64, routineNumber int) []int {
wg := &sync.WaitGroup{}
ids := GetContainerIDs(client)
idTable := make([][]string, routineNumber)
for i := 0; i < len(ids); i++ {
idTable[i%routineNumber] = append(idTable[i%routineNumber], ids[i])
}
wg.Add(routineNumber)
ratelimit := ratelimit.NewBucketWithRate(qps, int64(routineNumber))
latenciesTable := make([][]int, routineNumber)
for i := 0; i < routineNumber; i++ {
go func(index int) {
latencies := []int{}
for _, id := range idTable[index] {
ratelimit.Wait(1)
start := time.Now()
StopContainers(client, []string{id})
RemoveContainers(client, []string{id})
latencies = append(latencies, int(time.Since(start).Nanoseconds()))
}
latenciesTable[index] = latencies
wg.Done()
}(i)
}
wg.Wait()
allLatencies := []int{}
for _, latencies := range latenciesTable {
allLatencies = append(allLatencies, latencies...)
}
return allLatencies
}
示例5: init
func init() {
if len(os.Getenv("AWS_REGION")) == 0 {
logrus.Info("AWS_REGION is not set, skipping init of Route53 provider")
return
}
if len(os.Getenv("AWS_ACCESS_KEY")) == 0 {
logrus.Info("AWS_ACCESS_KEY is not set, skipping init of Route53 provider")
return
}
if len(os.Getenv("AWS_SECRET_KEY")) == 0 {
logrus.Info("AWS_SECRET_KEY is not set, skipping init of Route53 provider")
return
}
route53Handler := &Route53Handler{}
if err := RegisterProvider("route53", route53Handler); err != nil {
logrus.Fatal("Could not register route53 provider")
}
if err := setRegion(); err != nil {
logrus.Fatalf("Failed to set region: %v", err)
}
if err := setHostedZone(); err != nil {
logrus.Fatalf("Failed to set hosted zone for root domain %s: %v", dns.RootDomainName, err)
}
// Throttle Route53 API calls to 5 req/s
limiter = ratelimit.NewBucketWithRate(5.0, 1)
logrus.Infof("Configured %s with hosted zone \"%s\" in region \"%s\" ", route53Handler.GetName(), dns.RootDomainName, region.Name)
}
示例6: Init
func (r *Route53Provider) Init(rootDomainName string) error {
var region, accessKey, secretKey string
if region = os.Getenv("AWS_REGION"); len(region) == 0 {
return fmt.Errorf("AWS_REGION is not set")
}
if accessKey = os.Getenv("AWS_ACCESS_KEY"); len(accessKey) == 0 {
return fmt.Errorf("AWS_ACCESS_KEY is not set")
}
if secretKey = os.Getenv("AWS_SECRET_KEY"); len(secretKey) == 0 {
return fmt.Errorf("AWS_SECRET_KEY is not set")
}
// Comply with the API's 5 req/s rate limit. If there are other
// clients using the same account the AWS SDK will throttle the
// requests automatically if the global rate limit is exhausted.
r.limiter = ratelimit.NewBucketWithRate(5.0, 1)
creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
config := aws.NewConfig().WithMaxRetries(route53MaxRetries).
WithCredentials(creds).
WithRegion(region)
r.client = awsRoute53.New(session.New(config))
if err := r.setHostedZone(rootDomainName); err != nil {
return err
}
logrus.Infof("Configured %s with hosted zone %s in region %s",
r.GetName(), rootDomainName, region)
return nil
}
示例7: TestReader
func TestReader(t *testing.T) {
sizes := []int64{0, 1, capacity, blockSize, 4096, 99, 100}
for _, size := range sizes {
src := make([]byte, size)
_, err := rand.Read(src)
if err != nil {
t.Errorf("Could not read random data")
}
sr := slowReader{
bytes.NewBuffer(src),
ratelimit.NewBucketWithRate(1024*1024, capacity),
}
dst := make([]byte, size)
len, err := sr.Read(dst)
if err != nil {
t.Errorf("Read error: %s", err)
}
if int64(len) != size {
t.Errorf("Expected %d bytes, got %d", size, len)
}
if bytes.Equal(dst, src) != true {
t.Fail()
}
}
}
示例8: NewTokenBucketRateLimiter
// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
// smoothed qps rate of 'qps'.
// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
// The maximum number of tokens in the bucket is capped at 'burst'.
func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst))
return &tokenBucketRateLimiter{
limiter: limiter,
qps: qps,
}
}
示例9: TestWriter
func TestWriter(t *testing.T) {
sizes := []int64{0, 1, capacity, blockSize, 4096, 99, 100}
for _, size := range sizes {
b := &bytes.Buffer{}
sw := slowWriter{b, ratelimit.NewBucketWithRate(1024*1024, capacity)}
data := make([]byte, size)
_, err := rand.Read(data)
if err != nil {
t.Errorf("Could not read random data")
}
len, err := sw.Write(data)
if err != nil {
t.Errorf("Write error: %s", err)
}
if int64(len) != size {
t.Errorf("Expected to write %d bytes, wrote %d", size, len)
}
if bytes.Equal(data, b.Bytes()) != true {
t.Fail()
}
}
}
示例10: DefaultControllerRateLimiter
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
DefaultItemBasedRateLimiter(),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
)
}
示例11: Init
func (d *DNSimpleProvider) Init(rootDomainName string) error {
var email, apiToken string
if email = os.Getenv("DNSIMPLE_EMAIL"); len(email) == 0 {
return fmt.Errorf("DNSIMPLE_EMAIL is not set")
}
if apiToken = os.Getenv("DNSIMPLE_TOKEN"); len(apiToken) == 0 {
return fmt.Errorf("DNSIMPLE_TOKEN is not set")
}
d.root = utils.UnFqdn(rootDomainName)
d.client = api.NewClient(apiToken, email)
d.limiter = ratelimit.NewBucketWithRate(1.5, 5)
domains, _, err := d.client.Domains.List()
if err != nil {
return fmt.Errorf("Failed to list zones: %v", err)
}
found := false
for _, domain := range domains {
if domain.Name == d.root {
found = true
break
}
}
if !found {
return fmt.Errorf("Zone for '%s' not found", d.root)
}
logrus.Infof("Configured %s with zone '%s'", d.GetName(), d.root)
return nil
}
示例12: TestTokenBucketLimiter
func TestTokenBucketLimiter(t *testing.T) {
e := func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
for _, n := range []int{1, 2, 100} {
tb := jujuratelimit.NewBucketWithRate(float64(n), int64(n))
testLimiter(t, ratelimit.NewTokenBucketLimiter(tb)(e), n)
}
}
示例13: DefaultControllerRateLimiter
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
)
}
示例14: DoParallelContainerStartBenchmark
// DoParallelContainerStartBenchmark starts routineNumber of goroutines and let them start containers, returns latencies
// of all the starting calls in nanoseconds. There is a global rate limit on starting calls per second.
func DoParallelContainerStartBenchmark(client *docker.Client, qps float64, testPeriod time.Duration, routineNumber int) []int {
wg := &sync.WaitGroup{}
wg.Add(routineNumber)
ratelimit := ratelimit.NewBucketWithRate(qps, int64(routineNumber))
latenciesTable := make([][]int, routineNumber)
for i := 0; i < routineNumber; i++ {
go func(index int) {
startTime := time.Now()
latencies := []int{}
for {
ratelimit.Wait(1)
start := time.Now()
ids := CreateContainers(client, 1)
StartContainers(client, ids)
latencies = append(latencies, int(time.Since(start).Nanoseconds()))
if time.Now().Sub(startTime) >= testPeriod {
break
}
}
latenciesTable[index] = latencies
wg.Done()
}(i)
}
wg.Wait()
allLatencies := []int{}
for _, latencies := range latenciesTable {
allLatencies = append(allLatencies, latencies...)
}
return allLatencies
}
示例15: NewRateLimitReader
func NewRateLimitReader(rc io.ReadCloser, rate float64, capacity int64) io.ReadCloser {
var rlr rateLimitReader
rlr.rc = rc
rlr.rlr = ratelimit.Reader(rc, ratelimit.NewBucketWithRate(rate, capacity))
return &rlr
}