本文整理汇总了Golang中github.com/cockroachdb/cockroach/acceptance/cluster.Cluster.NewClient方法的典型用法代码示例。如果您正苦于以下问题:Golang Cluster.NewClient方法的具体用法?Golang Cluster.NewClient怎么用?Golang Cluster.NewClient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/acceptance/cluster.Cluster
的用法示例。
在下文中一共展示了Cluster.NewClient方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: testNodeRestartInner
func testNodeRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
if num <= 0 {
t.Fatalf("%d nodes in cluster", num)
}
// One client for each node.
initBank(t, c.PGUrl(0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, 1),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, 1),
}
client := &state.clients[0]
client.Lock()
client.db = makePGClient(t, c.PGUrl(num-1))
client.Unlock()
go transferMoneyLoop(0, &state, *numAccounts, *maxTransfer)
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf("monkey starts (seed %d)", seed)
pickNodes := func() []int {
return []int{rnd.Intn(num - 1)}
}
go chaosMonkey(&state, c, false, pickNodes)
waitClientsStop(1, &state, cfg.Stall)
// Verify accounts.
verifyAccounts(t, client)
elapsed := time.Since(start)
count := atomic.LoadUint64(&client.count)
log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
kvClient, kvStopper := c.NewClient(t, num-1)
defer kvStopper.Stop()
if pErr := kvClient.CheckConsistency(keys.TableDataMin, keys.TableDataMax); pErr != nil {
// TODO(.*): change back to t.Fatal after #5051.
log.Error(pErr)
}
}
示例2: testPutInner
func testPutInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
db, dbStopper := c.NewClient(t, 0)
defer dbStopper.Stop()
errs := make(chan error, c.NumNodes())
start := timeutil.Now()
deadline := start.Add(cfg.Duration)
var count int64
for i := 0; i < c.NumNodes(); i++ {
go func() {
r, _ := randutil.NewPseudoRand()
value := randutil.RandBytes(r, 8192)
for timeutil.Now().Before(deadline) {
k := atomic.AddInt64(&count, 1)
v := value[:r.Intn(len(value))]
if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil {
errs <- err
return
}
}
errs <- nil
}()
}
for i := 0; i < c.NumNodes(); {
baseCount := atomic.LoadInt64(&count)
select {
case <-stopper:
t.Fatalf("interrupted")
case err := <-errs:
if err != nil {
t.Fatal(err)
}
i++
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
loadedCount := atomic.LoadInt64(&count)
log.Infof(context.Background(), "%d (%d/s)", loadedCount, loadedCount-baseCount)
c.Assert(t)
cluster.Consistent(t, c)
}
}
elapsed := timeutil.Since(start)
log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例3: checkRangeReplication
func checkRangeReplication(t *testing.T, c cluster.Cluster, d time.Duration) {
if c.NumNodes() < 1 {
// Looks silly, but we actually start zero-node clusters in the
// reference tests.
t.Log("replication test is a no-op for empty cluster")
return
}
wantedReplicas := 3
if c.NumNodes() < 3 {
wantedReplicas = c.NumNodes()
}
log.Infof(context.Background(), "waiting for first range to have %d replicas", wantedReplicas)
util.SucceedsSoon(t, func() error {
// Reconnect on every iteration; gRPC will eagerly tank the connection
// on transport errors. Always talk to node 0 because it's guaranteed
// to exist.
client, dbStopper := c.NewClient(t, 0)
defer dbStopper.Stop()
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
foundReplicas, err := countRangeReplicas(client)
if err != nil {
return err
}
if log.V(1) {
log.Infof(context.Background(), "found %d replicas", foundReplicas)
}
if foundReplicas >= wantedReplicas {
return nil
}
return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
})
log.Infof(context.Background(), "found %d replicas", wantedReplicas)
}
示例4: checkRangeReplication
func checkRangeReplication(t *testing.T, c cluster.Cluster, d time.Duration) {
// Always talk to node 0.
client, dbStopper := c.NewClient(t, 0)
defer dbStopper.Stop()
wantedReplicas := 3
if c.NumNodes() < 3 {
wantedReplicas = c.NumNodes()
}
log.Infof("waiting for first range to have %d replicas", wantedReplicas)
util.SucceedsSoon(t, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
foundReplicas, err := countRangeReplicas(client)
if err != nil {
return err
}
if log.V(1) {
log.Infof("found %d replicas", foundReplicas)
}
if foundReplicas >= wantedReplicas {
return nil
}
return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
})
log.Infof("found %d replicas", wantedReplicas)
}
示例5: testGossipRestartInner
func testGossipRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
// This already replicates the first range (in the local setup).
// The replication of the first range is important: as long as the
// first range only exists on one node, that node can trivially
// acquire the range lease. Once the range is replicated, however,
// nodes must be able to discover each other over gossip before the
// lease can be acquired.
num := c.NumNodes()
deadline := timeutil.Now().Add(cfg.Duration)
waitTime := longWaitTime
if cfg.Duration < waitTime {
waitTime = shortWaitTime
}
for timeutil.Now().Before(deadline) {
log.Infof(context.Background(), "waiting for initial gossip connections")
checkGossip(t, c, waitTime, hasPeers(num))
checkGossip(t, c, waitTime, hasClusterID)
checkGossip(t, c, waitTime, hasSentinel)
log.Infof(context.Background(), "killing all nodes")
for i := 0; i < num; i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
log.Infof(context.Background(), "restarting all nodes")
for i := 0; i < num; i++ {
if err := c.Restart(i); err != nil {
t.Fatal(err)
}
}
log.Infof(context.Background(), "waiting for gossip to be connected")
checkGossip(t, c, waitTime, hasPeers(num))
checkGossip(t, c, waitTime, hasClusterID)
checkGossip(t, c, waitTime, hasSentinel)
for i := 0; i < num; i++ {
db, dbStopper := c.NewClient(t, i)
if i == 0 {
if err := db.Del("count"); err != nil {
t.Fatal(err)
}
}
var kv client.KeyValue
if err := db.Txn(func(txn *client.Txn) error {
var err error
kv, err = txn.Inc("count", 1)
return err
}); err != nil {
t.Fatal(err)
} else if v := kv.ValueInt(); v != int64(i+1) {
t.Fatalf("unexpected value %d for write #%d (expected %d)", v, i, i+1)
}
dbStopper.Stop()
}
}
}
示例6: testSingleKeyInner
func testSingleKeyInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
// Initialize the value for our test key to zero.
const key = "test-key"
initDB, initDBStopper := c.NewClient(t, 0)
defer initDBStopper.Stop()
if err := initDB.Put(key, 0); err != nil {
t.Fatal(err)
}
type result struct {
err error
maxLatency time.Duration
}
resultCh := make(chan result, num)
deadline := timeutil.Now().Add(cfg.Duration)
var expected int64
// Start up num workers each reading and writing the same
// key. Each worker is configured to talk to a different node in the
// cluster.
for i := 0; i < num; i++ {
db, dbStopper := c.NewClient(t, i)
defer dbStopper.Stop()
go func() {
var r result
for timeutil.Now().Before(deadline) {
start := timeutil.Now()
err := db.Txn(func(txn *client.Txn) error {
minExp := atomic.LoadInt64(&expected)
r, err := txn.Get(key)
if err != nil {
return err
}
b := txn.NewBatch()
v := r.ValueInt()
b.Put(key, v+1)
err = txn.CommitInBatch(b)
// Atomic updates after the fact mean that we should read
// exp or larger (since concurrent writers might have
// committed but not yet performed their atomic update).
if err == nil && v < minExp {
return util.Errorf("unexpected read: %d, expected >= %d", v, minExp)
}
return err
})
if err != nil {
resultCh <- result{err: err}
return
}
atomic.AddInt64(&expected, 1)
latency := timeutil.Since(start)
if r.maxLatency < latency {
r.maxLatency = latency
}
}
resultCh <- r
}()
}
// Verify that none of the workers encountered an error.
var results []result
for len(results) < num {
select {
case <-stopper:
t.Fatalf("interrupted")
case r := <-resultCh:
if r.err != nil {
t.Fatal(r.err)
}
results = append(results, r)
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
log.Infof("%d", atomic.LoadInt64(&expected))
}
}
// Verify the resulting value stored at the key is what we expect.
r, err := initDB.Get(key)
if err != nil {
t.Fatal(err)
}
v := r.ValueInt()
if expected != v {
t.Fatalf("expected %d, but found %d", expected, v)
}
var maxLatency []time.Duration
for _, r := range results {
maxLatency = append(maxLatency, r.maxLatency)
}
log.Infof("%d increments: %s", v, maxLatency)
}
示例7: testRaftUpdateInner
func testRaftUpdateInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
minAffected := int64(server.ExpectedInitialRangeCount())
mustPost := func(freeze bool) server.ClusterFreezeResponse {
reply, err := postFreeze(c, freeze)
if err != nil {
t.Fatal(util.ErrorfSkipFrames(1, "%v", err))
}
return reply
}
if reply := mustPost(false); reply.RangesAffected != 0 {
t.Fatalf("expected initial unfreeze to affect no ranges, got %d", reply.RangesAffected)
}
if reply := mustPost(true); reply.RangesAffected < minAffected {
t.Fatalf("expected >=%d frozen ranges, got %d", minAffected, reply.RangesAffected)
}
if reply := mustPost(true); reply.RangesAffected != 0 {
t.Fatalf("expected second freeze to affect no ranges, got %d", reply.RangesAffected)
}
if reply := mustPost(false); reply.RangesAffected < minAffected {
t.Fatalf("expected >=%d thawed ranges, got %d", minAffected, reply.RangesAffected)
}
num := c.NumNodes()
if num < 3 {
t.Skip("skipping remainder of test; needs at least 3 nodes")
}
// Kill the last node.
if err := c.Kill(num - 1); err != nil {
t.Fatal(err)
}
// Attempt to freeze should get stuck (since it does not get confirmation
// of the last node receiving the freeze command).
if reply, err := postFreeze(c, true); !testutils.IsError(err, "timed out waiting for Range|Timeout exceeded while") {
t.Fatalf("expected timeout, got %v: %v", err, reply)
}
// Shut down the remaining nodes and restart then.
for i := 0; i < num-1; i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
for i := 0; i < num; i++ {
if err := c.Restart(i); err != nil {
t.Fatal(err)
}
}
// The cluster should now be fully operational (at least after waiting
// a little bit) since each node tries to unfreeze everything when it
// starts.
if err := util.RetryForDuration(time.Minute, func() error {
// TODO(tschottdorf): moving the client creation outside of the retry
// loop will break the test with the following message:
//
// client/rpc_sender.go:61: roachpb.Batch RPC failed as client
// connection was closed
//
// Perhaps the cluster updates the address too late after restarting
// the node.
db, dbStopper := c.NewClient(t, 0)
defer dbStopper.Stop()
_, err := db.Scan(keys.LocalMax, roachpb.KeyMax, 0)
if err != nil {
log.Info(err)
}
return err
}); err != nil {
t.Fatal(err)
}
// Unfreezing again should be a no-op.
if reply, err := postFreeze(c, false); err != nil {
t.Fatal(err)
} else if reply.RangesAffected > 0 {
t.Fatalf("still %d frozen ranges", reply.RangesAffected)
}
}