本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/acceptance/cluster.Cluster.Assert方法的典型用法代码示例。如果您正苦于以下问题:Golang Cluster.Assert方法的具体用法?Golang Cluster.Assert怎么用?Golang Cluster.Assert使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/pkg/acceptance/cluster.Cluster
的用法示例。
在下文中一共展示了Cluster.Assert方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: testPutInner
func testPutInner(ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
db, err := c.NewClient(ctx, 0)
if err != nil {
t.Fatal(err)
}
errs := make(chan error, c.NumNodes())
start := timeutil.Now()
deadline := start.Add(cfg.Duration)
var count int64
for i := 0; i < c.NumNodes(); i++ {
go func() {
r, _ := randutil.NewPseudoRand()
value := randutil.RandBytes(r, 8192)
for timeutil.Now().Before(deadline) {
k := atomic.AddInt64(&count, 1)
v := value[:r.Intn(len(value))]
if err := db.Put(ctx, fmt.Sprintf("%08d", k), v); err != nil {
errs <- err
return
}
}
errs <- nil
}()
}
for i := 0; i < c.NumNodes(); {
baseCount := atomic.LoadInt64(&count)
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
case err := <-errs:
if err != nil {
t.Fatal(err)
}
i++
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
loadedCount := atomic.LoadInt64(&count)
log.Infof(ctx, "%d (%d/s)", loadedCount, loadedCount-baseCount)
c.Assert(ctx, t)
if err := cluster.Consistent(ctx, c, 0); err != nil {
t.Fatal(err)
}
}
}
elapsed := timeutil.Since(start)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例2: chaosMonkey
// chaosMonkey picks a set of nodes and restarts them. If stopClients is set
// all the clients are locked before the nodes are restarted.
func chaosMonkey(
ctx context.Context,
state *testState,
c cluster.Cluster,
stopClients bool,
pickNodes func() []int,
consistentIdx int,
) {
defer close(state.teardown)
for curRound := uint64(1); !state.done(); curRound++ {
atomic.StoreUint64(&state.monkeyIteration, curRound)
select {
case <-stopper.ShouldStop():
return
default:
}
// Pick nodes to be restarted.
nodes := pickNodes()
if stopClients {
// Prevent all clients from writing while nodes are being restarted.
for i := 0; i < len(state.clients); i++ {
state.clients[i].Lock()
}
}
log.Infof(ctx, "round %d: restarting nodes %v", curRound, nodes)
for _, i := range nodes {
// Two early exit conditions.
select {
case <-stopper.ShouldStop():
break
default:
}
if state.done() {
break
}
log.Infof(ctx, "round %d: restarting %d", curRound, i)
if err := c.Kill(ctx, i); err != nil {
state.t.Error(err)
}
if err := c.Restart(ctx, i); err != nil {
state.t.Error(err)
}
if stopClients {
// Reinitialize the client talking to the restarted node.
state.initClient(ctx, state.t, c, i)
}
}
if stopClients {
for i := 0; i < len(state.clients); i++ {
state.clients[i].Unlock()
}
}
preCount := state.counts()
madeProgress := func() bool {
newCounts := state.counts()
for i := range newCounts {
if newCounts[i] > preCount[i] {
return true
}
}
return false
}
// Sleep until at least one client is writing successfully.
log.Warningf(ctx, "round %d: monkey sleeping while cluster recovers...", curRound)
for !state.done() && !madeProgress() {
time.Sleep(time.Second)
}
c.Assert(ctx, state.t)
if err := cluster.Consistent(ctx, c, consistentIdx); err != nil {
state.t.Error(err)
}
log.Warningf(ctx, "round %d: cluster recovered", curRound)
}
}