本文整理汇总了Golang中testing.B.RunParallel方法的典型用法代码示例。如果您正苦于以下问题:Golang B.RunParallel方法的具体用法?Golang B.RunParallel怎么用?Golang B.RunParallel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类testing.B
的用法示例。
在下文中一共展示了B.RunParallel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: parallelBench
func parallelBench(b *testing.B, cm Congomap, fn func(*testing.PB)) {
// doesn't necessarily fill the entire map
for i := 0; i < len(states); i++ {
cm.Store(randomState(), randomState())
}
b.RunParallel(fn)
}
示例2: BenchmarkSprintfBoolean
func BenchmarkSprintfBoolean(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
Sprintf("%t", true)
}
})
}
示例3: BenchmarkConnRoutingKey
func BenchmarkConnRoutingKey(b *testing.B) {
const workers = 16
cluster := createCluster()
cluster.NumConns = 1
cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())
session := createSessionFromCluster(cluster, b)
defer session.Close()
if err := createTable(session, "CREATE TABLE IF NOT EXISTS routing_key_stress (id int primary key)"); err != nil {
b.Fatal(err)
}
var seed uint64
writer := func(pb *testing.PB) {
seed := atomic.AddUint64(&seed, 1)
var i uint64 = 0
query := session.Query("insert into routing_key_stress (id) values (?)")
for pb.Next() {
if _, err := query.Bind(i * seed).GetRoutingKey(); err != nil {
b.Error(err)
return
}
i++
}
}
b.SetParallelism(workers)
b.RunParallel(writer)
}
示例4: BenchmarkSprintfString
func BenchmarkSprintfString(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
Sprintf("%s", "hello")
}
})
}
示例5: BenchmarkSprintfPrefixedInt
func BenchmarkSprintfPrefixedInt(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
Sprintf("This is some meaningless prefix text that needs to be scanned %d", 6)
}
})
}
示例6: BenchmarkSelectNonblock
func BenchmarkSelectNonblock(b *testing.B) {
myc1 := make(chan int)
myc2 := make(chan int)
myc3 := make(chan int, 1)
myc4 := make(chan int, 1)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
select {
case <-myc1:
default:
}
select {
case myc2 <- 0:
default:
}
select {
case <-myc3:
default:
}
select {
case myc4 <- 0:
default:
}
}
})
}
示例7: BenchmarkSprintfPadding
func BenchmarkSprintfPadding(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
Sprintf("%16f", 1.0)
}
})
}
示例8: BenchmarkHashGet
func BenchmarkHashGet(b *testing.B) {
mPath := "/test"
cli := NewZKClient(testServers, time.Second*5, nil)
defer cli.Close()
cli.CreatePersistNode(mPath)
defer cli.DeleteNode(mPath)
cli.CreateEphemeralNode(mPath + "/127.0.0.1:6379")
cli.CreateEphemeralNode(mPath + "/127.0.0.1:6380")
fmt.Println("===============create addr dir===============")
time.Sleep(2 * time.Second)
zkm := NewZKMonitor(testServers, time.Second*5, &testService{}, nil, mPath)
zkm.Run()
defer zkm.Close()
time.Sleep(2 * time.Second)
hGetter := zkm.HashGetter(nil)
key := []byte{1, 2, 3, 4, 5, 6, 7, 8}
nilCnt := int32(0)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
key[0] = byte(rand.Int())
conn := hGetter.GetConn(key)
if conn == nil {
atomic.AddInt32(&nilCnt, 1)
}
}
})
b.Log("nil connection count:", nilCnt)
}
示例9: runMVCCMerge
// runMVCCMerge merges value into numKeys separate keys.
func runMVCCMerge(value *roachpb.Value, numKeys int, b *testing.B) {
stopper := stop.NewStopper()
defer stopper.Stop()
rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper)
// Precompute keys so we don't waste time formatting them at each iteration.
keys := make([]roachpb.Key, numKeys)
for i := 0; i < numKeys; i++ {
keys[i] = roachpb.Key(fmt.Sprintf("key-%d", i))
}
b.ResetTimer()
// Use parallelism if specified when test is run.
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
ms := MVCCStats{}
if err := MVCCMerge(rocksdb, &ms, keys[rand.Intn(numKeys)], *value); err != nil {
b.Fatal(err)
}
}
})
// Read values out to force merge.
for _, key := range keys {
val, _, err := MVCCGet(rocksdb, key, roachpb.ZeroTimestamp, true, nil)
if err != nil {
b.Fatal(err)
} else if val == nil {
continue
}
}
b.StopTimer()
}
示例10: runBenchmarkBank
// runBenchmarkBank mirrors the SQL performed by examples/sql_bank, but
// structured as a benchmark for easier usage of the Go performance analysis
// tools like pprof, memprof and trace.
func runBenchmarkBank(b *testing.B, db *sql.DB) {
if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bank`); err != nil {
b.Fatal(err)
}
{
// Initialize the "accounts" table.
schema := `
CREATE TABLE IF NOT EXISTS bank.accounts (
id INT PRIMARY KEY,
balance INT NOT NULL
)`
if _, err := db.Exec(schema); err != nil {
b.Fatal(err)
}
if _, err := db.Exec("TRUNCATE TABLE bank.accounts"); err != nil {
b.Fatal(err)
}
var placeholders bytes.Buffer
var values []interface{}
for i := 0; i < *numAccounts; i++ {
if i > 0 {
placeholders.WriteString(", ")
}
fmt.Fprintf(&placeholders, "($%d, 0)", i+1)
values = append(values, i)
}
stmt := `INSERT INTO bank.accounts (id, balance) VALUES ` + placeholders.String()
if _, err := db.Exec(stmt, values...); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
from := rand.Intn(*numAccounts)
to := rand.Intn(*numAccounts - 1)
if from == to {
to = *numAccounts - 1
}
amount := rand.Intn(*maxTransfer)
update := `
UPDATE bank.accounts
SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END
WHERE id IN ($1, $2) AND (SELECT balance >= $3 FROM bank.accounts WHERE id = $1)
`
if _, err := db.Exec(update, from, to, amount); err != nil {
if log.V(1) {
log.Warning(err)
}
continue
}
}
})
b.StopTimer()
}
示例11: runClientScan
// runClientScan first creates test data (and resets the benchmarking
// timer). It then performs b.N client scans in increments of numRows
// keys over all of the data, restarting at the beginning of the
// keyspace, as many times as necessary.
func runClientScan(useSSL bool, numRows, numVersions int, b *testing.B) {
const numKeys = 100000
s, db := setupClientBenchData(useSSL, numVersions, numKeys, b)
defer s.Stop()
b.SetBytes(int64(numRows * valueSize))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
startKeyBuf := append(make([]byte, 0, 64), []byte("key-")...)
endKeyBuf := append(make([]byte, 0, 64), []byte("key-")...)
for pb.Next() {
// Choose a random key to start scan.
keyIdx := rand.Int31n(int32(numKeys - numRows))
startKey := roachpb.Key(encoding.EncodeUvarintAscending(
startKeyBuf, uint64(keyIdx)))
endKey := roachpb.Key(encoding.EncodeUvarintAscending(
endKeyBuf, uint64(keyIdx)+uint64(numRows)))
rows, pErr := db.Scan(startKey, endKey, int64(numRows))
if pErr != nil {
b.Fatalf("failed scan: %s", pErr)
}
if len(rows) != numRows {
b.Fatalf("failed to scan: %d != %d", len(rows), numRows)
}
}
})
b.StopTimer()
}
示例12: runMVCCGet
// runMVCCGet first creates test data (and resets the benchmarking
// timer). It then performs b.N MVCCGets.
func runMVCCGet(numVersions int, b *testing.B) {
// Use the same number of keys for all of the mvcc get
// benchmarks. Using a different number of keys per test gives
// preferential treatment to tests with fewer keys. Note that the
// datasets all fit in cache and the cache is pre-warmed.
const numKeys = 100000
rocksdb := setupMVCCScanData(numVersions, numKeys, b)
defer rocksdb.Close()
prewarmCache(rocksdb)
b.SetBytes(1024)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
for pb.Next() {
// Choose a random key to retrieve.
keyIdx := rand.Int31n(int32(numKeys))
key := proto.Key(encoding.EncodeUvarint(keyBuf[0:4], uint64(keyIdx)))
walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1))
ts := makeTS(walltime, 0)
if v, _, err := MVCCGet(rocksdb, key, ts, true, nil); err != nil {
b.Fatalf("failed get: %s", err)
} else if len(v.Bytes) != 1024 {
b.Fatalf("unexpected value size: %d", len(v.Bytes))
}
}
})
b.StopTimer()
}
示例13: runMVCCScan
// runMVCCScan first creates test data (and resets the benchmarking
// timer). It then performs b.N MVCCScans in increments of numRows
// keys over all of the data in the rocksdb instance, restarting at
// the beginning of the keyspace, as many times as necessary.
func runMVCCScan(numRows, numVersions int, b *testing.B) {
// Use the same number of keys for all of the mvcc scan
// benchmarks. Using a different number of keys per test gives
// preferential treatment to tests with fewer keys. Note that the
// datasets all fit in cache and the cache is pre-warmed.
const numKeys = 100000
rocksdb := setupMVCCScanData(numVersions, numKeys, b)
defer rocksdb.Close()
prewarmCache(rocksdb)
b.SetBytes(int64(numRows * 1024))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
for pb.Next() {
// Choose a random key to start scan.
keyIdx := rand.Int31n(int32(numKeys - numRows))
startKey := proto.Key(encoding.EncodeUvarint(keyBuf[0:4], uint64(keyIdx)))
walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1))
ts := makeTS(walltime, 0)
kvs, _, err := MVCCScan(rocksdb, startKey, proto.KeyMax, int64(numRows), ts, true, nil)
if err != nil {
b.Fatalf("failed scan: %s", err)
}
if len(kvs) != numRows {
b.Fatalf("failed to scan: %d != %d", len(kvs), numRows)
}
}
})
b.StopTimer()
}
示例14: BenchmarkManyConcurrentQueries
func BenchmarkManyConcurrentQueries(b *testing.B) {
b.ReportAllocs()
// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
const parallelism = 16
db := newTestDB(b, "magicquery")
defer closeDB(b, db)
db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rows, err := stmt.Query("sleep", 1)
if err != nil {
b.Error(err)
return
}
rows.Close()
}
})
}
示例15: BenchmarkAddAndQueryPost
func BenchmarkAddAndQueryPost(b *testing.B) {
// Pre-build posts and queries so we're not measuring that.
rand := rand.New(rand.NewSource(time.Now().UnixNano()))
idx := &PostIndex{}
posts := createPosts(aliceChain, 100000, rand) // Large number of posts to query
for _, v := range posts {
idx.AddPost(v.id, v.words)
}
posts = createPosts(aliceChain, b.N, rand) // New posts!
queries := make([]string, b.N)
for i := 0; i < len(queries); i++ {
ql := rand.Intn(4) + 1
t := aliceChain.Generate(ql, rand)
w := splitToWords(t)
queries[i] = randomQuery(w, rand)
}
var index int32 = -1 // Count up to N but atomically
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
i := atomic.AddInt32(&index, 1)
if rand.Intn(5) == 0 {
p := posts[i]
idx.AddPost(p.id, p.words)
} else {
q := queries[i]
idx.QueryPosts(q, 100)
}
}
})
}