本文整理汇总了Golang中testing.B.SetParallelism方法的典型用法代码示例。如果您正苦于以下问题:Golang B.SetParallelism方法的具体用法?Golang B.SetParallelism怎么用?Golang B.SetParallelism使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类testing.B
的用法示例。
在下文中一共展示了B.SetParallelism方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BenchmarkHbasePut
func BenchmarkHbasePut(b *testing.B) {
var (
err error
h = NewHBaseClient()
m = &meta.Needle{}
t int64
)
ch := make(chan int64, 1000000)
if err = Init("172.16.13.90:9090", 5*time.Second, 200, 200); err != nil {
b.Errorf("Init failed")
b.FailNow()
}
for j := 0; j < 1000000; j++ {
k := int64(time.Now().UnixNano())
ch <- k
}
b.ResetTimer()
b.SetParallelism(8)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
t = <-ch
m.Key = t
if err = h.Put(m); err != nil {
continue
}
}
})
}
示例2: benchPutItemParallel
func benchPutItemParallel(p, c int, b *testing.B) {
svc := dynamodb.New(&aws.Config{
DisableSSL: aws.Bool(true),
})
av, err := dynamodbattribute.ConvertToMap(dbItem{Key: "MyKey", Data: "MyData"})
if err != nil {
b.Fatal("expect no ConvertToMap errors", err)
}
params := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(testTableName),
}
b.N = c
b.ResetTimer()
b.SetParallelism(p)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err = svc.PutItem(params)
if err != nil {
b.Error("expect no request errors", err)
}
}
})
}
示例3: benchmarkStrconvComparison
func benchmarkStrconvComparison(i int, b *testing.B) {
var IDMutex sync.Mutex
highestID := "122222222"
if i == 0 {
for n := 0; n < b.N; n++ {
postIDint, _ := strconv.Atoi("110312919")
IDMutex.Lock()
highestIDint, _ := strconv.Atoi(highestID)
if postIDint >= highestIDint {
highestID = "110312919"
}
IDMutex.Unlock()
}
} else {
b.SetParallelism(i)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
postIDint, _ := strconv.Atoi("110312919")
IDMutex.Lock()
highestIDint, _ := strconv.Atoi(highestID)
if postIDint >= highestIDint {
highestID = "110312919"
}
IDMutex.Unlock()
}
})
}
}
示例4: BenchmarkQuotaRequests
func BenchmarkQuotaRequests(b *testing.B) {
fmt.Println("Starting example client.")
serverAddr := "127.0.0.1:10990"
var opts []grpc.DialOption
opts = append(opts, grpc.WithInsecure())
conn, err := grpc.Dial(serverAddr, opts...)
if err != nil {
grpclog.Fatalf("fail to dial: %v", err)
}
defer conn.Close()
client := pb.NewQuotaServiceClient(conn)
req := &pb.AllowRequest{
Namespace: "test.namespace",
BucketName: "one",
TokensRequested: 1}
b.ResetTimer()
b.SetParallelism(8)
b.RunParallel(
func(pb *testing.PB) {
for pb.Next() {
client.Allow(context.TODO(), req)
}
})
}
示例5: benchmarkInt64Comparison
func benchmarkInt64Comparison(i int, b *testing.B) {
var IDMutex sync.Mutex
highestID := int64(122222222)
if i == 0 {
for n := 0; n < b.N; n++ {
idNew := int64(132145174)
IDMutex.Lock()
if highestID < idNew {
highestID = idNew
}
IDMutex.Unlock()
}
} else {
b.SetParallelism(i)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
idNew := int64(132145174)
IDMutex.Lock()
if highestID < idNew {
highestID = idNew
}
IDMutex.Unlock()
}
})
}
}
示例6: BenchmarkAtomicIncrement
func BenchmarkAtomicIncrement(store data.Store, b *testing.B) {
if err := store.SetLifetime(time.Second*30, data.ScopeAll); err != nil {
b.Skip("Set lifetime to all items is not supported")
}
b.ResetTimer()
b.SetParallelism(50)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := store.Increment("key001"); err != nil {
b.Errorf("Could not increment value: %v", err)
}
}
})
b.StopTimer()
var result int
if err := store.Get("key001", &result); err != nil {
b.Errorf("Could not get stored value: %v", err)
}
if result != b.N {
b.Errorf("Unexpected value: got %d instead of %d", result, b.N)
}
}
示例7: BThreadsAtomicComplex
func BThreadsAtomicComplex(b *testing.B, sm api.StoredMap) {
l := len(UniqKey)
inserter := func(key string) {
sm.Atomic(func(m api.Mapper) {
if value, found := m.Find(key); found {
_ = value.(string)
return
}
m.SetKey(key)
m.Update(key)
})
}
b.SetParallelism(CntBenchWorks)
b.ReportAllocs()
b.SetBytes(2)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
var k string
var i int
for pb.Next() {
k = UniqKey[i%l]
inserter(k)
i++
}
})
}
示例8: BenchmarkNetrpcByteSlice
func BenchmarkNetrpcByteSlice(b *testing.B) {
connC, connS := getTcpPipe(b)
defer connC.Close()
defer connS.Close()
s := rpc.NewServer()
if err := s.Register(&NetrpcService{}); err != nil {
b.Fatalf("Error when registering rpc service: %s", err)
}
go s.ServeConn(connS)
c := rpc.NewClient(connC)
defer c.Close()
req := []byte("byte slice byte slice aaa bbb ccc foobar")
b.SetParallelism(250)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
var resp []byte
for i := 0; pb.Next(); i++ {
if err := c.Call("NetrpcService.ByteSlice", req, &resp); err != nil {
b.Fatalf("Unexpected error when calling NetrpcService.ByteSlice(%q): %s", req, err)
}
if !bytes.Equal(resp, req) {
b.Fatalf("Unexpected response: %q. Expected %q", resp, req)
}
}
})
}
示例9: BenchmarkBatch200RandomWritesParallel10
func BenchmarkBatch200RandomWritesParallel10(b *testing.B) {
var term Term
var data []map[string]interface{}
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for is := 0; is < 200; is++ {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
cid := map[string]interface{}{
"customer_id": strconv.FormatInt(r.Int63(), 10),
}
data = append(data, cid)
}
// Insert the new item into the database
term = DB("benchmarks").Table("benchmarks").Insert(data)
// Insert the new item into the database
_, err := term.RunWrite(session, RunOpts{
MinBatchRows: 200,
MaxBatchRows: 200,
})
if err != nil {
b.Errorf("insert failed [%s] ", err)
}
}
})
}
示例10: BenchmarkSequentialWritesParallel10
func BenchmarkSequentialWritesParallel10(b *testing.B) {
var mu sync.Mutex
si := 0
// p*GOMAXPROCS
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
mu.Lock()
si++
mu.Unlock()
data := map[string]interface{}{
"customer_id": si,
}
// Insert the new item into the database
_, err := Table(bTableName).Insert(data).RunWrite(bSess)
if err != nil {
b.Errorf("insert failed [%s] ", err)
return
}
}
})
}
示例11: Benchmark_TerrorParallelGetting
func Benchmark_TerrorParallelGetting(b *testing.B) {
errs := []Terror{
E_IO{},
E_Network{},
TE0{},
TE1{},
TE2{},
TE3{},
TE4{},
TE5{},
TE6{},
TE7{},
TE8{},
TE9{},
}
b.SetParallelism(4)
b.RunParallel(func(b *testing.PB) {
var which int
for b.Next() {
which = (which + 1) % 12
err := errs[which]
hier := GetHierarchy(reflect.TypeOf(err))
_ = hier
}
})
}
示例12: BenchmarkMutexNoSpin
func BenchmarkMutexNoSpin(b *testing.B) {
// This benchmark models a situation where spinning in the mutex should be
// non-profitable and allows to confirm that spinning does not do harm.
// To achieve this we create excess of goroutines most of which do local work.
// These goroutines yield during local work, so that switching from
// a blocked goroutine to other goroutines is profitable.
// As a matter of fact, this benchmark still triggers some spinning in the mutex.
var m Mutex
var acc0, acc1 uint64
b.SetParallelism(4)
b.RunParallel(func(pb *testing.PB) {
c := make(chan bool)
var data [4 << 10]uint64
for i := 0; pb.Next(); i++ {
if i%4 == 0 {
m.Lock()
acc0 -= 100
acc1 += 100
m.Unlock()
} else {
for i := 0; i < len(data); i += 4 {
data[i]++
}
// Elaborate way to say runtime.Gosched
// that does not put the goroutine onto global runq.
go func() {
c <- true
}()
<-c
}
}
})
}
示例13: BenchmarkRemoteClient
func BenchmarkRemoteClient(t *testing.B) {
t.SetParallelism(4)
t.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := 0; i < t.N; i++ {
p := packet.NewPacket(1, []byte("echo"))
tmp := clientManager.FindRemoteClients([]string{"a"}, func(groupid string, c *client.RemotingClient) bool {
return false
})
_, err := tmp["a"][0].WriteAndGet(*p, 500*time.Millisecond)
clientf.WriteFlow.Incr(1)
if nil != err {
t.Fail()
log.Printf("WAIT RESPONSE FAIL|%s\n", err)
} else {
// log.Printf("WAIT RESPONSE SUCC|%s\n", string(resp.([]byte)))
}
}
}
})
}
示例14: benchmarkMutex
func benchmarkMutex(b *testing.B, slack, work bool) {
client := etcd.NewClient([]string{"http://127.0.0.1:4001"})
client.Delete(key, true)
mu := NewMutexFromClient(client, key, 0)
if slack {
b.SetParallelism(10)
}
b.RunParallel(func(pb *testing.PB) {
foo := 0
for pb.Next() {
err := mu.Lock()
if err != nil {
b.Fatal("could not acquire lock, is etcd running?", err)
}
mu.Unlock()
if work {
for i := 0; i < 100; i++ {
foo *= 2
foo /= 2
}
}
}
_ = foo
})
}
示例15: BenchmarkManyConcurrentQueries
func BenchmarkManyConcurrentQueries(b *testing.B) {
b.ReportAllocs()
// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
const parallelism = 16
db := newTestDB(b, "magicquery")
defer closeDB(b, db)
db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rows, err := stmt.Query("sleep", 1)
if err != nil {
b.Error(err)
return
}
rows.Close()
}
})
}