本文整理汇总了Golang中github.com/cockroachdb/cockroach/acceptance/cluster.Cluster.NumNodes方法的典型用法代码示例。如果您正苦于以下问题:Golang Cluster.NumNodes方法的具体用法?Golang Cluster.NumNodes怎么用?Golang Cluster.NumNodes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/acceptance/cluster.Cluster
的用法示例。
在下文中一共展示了Cluster.NumNodes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: cutNetwork
func cutNetwork(t *testing.T, c cluster.Cluster, closer <-chan struct{}, partitions ...[]int) {
addrs, addrsToNode := mustGetHosts(t, c)
ipPartitions := make([][]iptables.IP, 0, len(partitions))
for _, partition := range partitions {
ipPartition := make([]iptables.IP, 0, len(partition))
for _, nodeIndex := range partition {
ipPartition = append(ipPartition, addrs[nodeIndex])
}
ipPartitions = append(ipPartitions, ipPartition)
}
log.Warningf("partitioning: %v (%v)", partitions, ipPartitions)
for host, cmds := range iptables.Rules(iptables.Bidirectional(ipPartitions...)) {
for _, cmd := range cmds {
if err := c.ExecRoot(addrsToNode[host], cmd); err != nil {
t.Fatal(err)
}
}
}
<-closer
for i := 0; i < c.NumNodes(); i++ {
for _, cmd := range iptables.Reset() {
if err := c.ExecRoot(i, cmd); err != nil {
t.Fatal(err)
}
}
}
log.Warningf("resolved all partitions")
}
示例2: checkRangeReplication
func checkRangeReplication(t *testing.T, c cluster.Cluster, d time.Duration) {
// Always talk to node 0.
client, dbStopper := makeClient(t, c.ConnString(0))
defer dbStopper.Stop()
wantedReplicas := 3
if c.NumNodes() < 3 {
wantedReplicas = c.NumNodes()
}
log.Infof("waiting for first range to have %d replicas", wantedReplicas)
util.SucceedsWithin(t, d, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
foundReplicas, err := countRangeReplicas(client)
if err != nil {
return err
}
if log.V(1) {
log.Infof("found %d replicas", foundReplicas)
}
if foundReplicas >= wantedReplicas {
return nil
}
return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
})
}
示例3: checkGossip
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration,
f checkGossipFunc) {
util.SucceedsWithin(t, d, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
for i := 0; i < c.NumNodes(); i++ {
var m map[string]interface{}
if err := getJSON(c.URL(i), "/_status/gossip/local", &m); err != nil {
return err
}
infos, ok := m["infos"].(map[string]interface{})
if !ok {
return errors.New("no infos yet")
}
if err := f(infos); err != nil {
return util.Errorf("node %d: %s", i, err)
}
}
return nil
})
}
示例4: testGossipPeeringsInner
func testGossipPeeringsInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
deadline := timeutil.Now().Add(cfg.Duration)
waitTime := longWaitTime
if cfg.Duration < waitTime {
waitTime = shortWaitTime
}
for timeutil.Now().Before(deadline) {
checkGossip(t, c, waitTime, hasPeers(num))
// Restart the first node.
log.Infof(context.Background(), "restarting node 0")
if err := c.Restart(0); err != nil {
t.Fatal(err)
}
checkGossip(t, c, waitTime, hasPeers(num))
// Restart another node (if there is one).
var pickedNode int
if num > 1 {
pickedNode = rand.Intn(num-1) + 1
}
log.Infof(context.Background(), "restarting node %d", pickedNode)
if err := c.Restart(pickedNode); err != nil {
t.Fatal(err)
}
checkGossip(t, c, waitTime, hasPeers(num))
}
}
示例5: testBuildInfoInner
func testBuildInfoInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
checkGossip(t, c, 20*time.Second, hasPeers(c.NumNodes()))
var details server.DetailsResponse
util.SucceedsSoon(t, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
default:
}
return util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/details/local", &details)
})
bi := details.BuildInfo
testData := map[string]string{
"go_version": bi.GoVersion,
"tag": bi.Tag,
"time": bi.Time,
"dependencies": bi.Dependencies,
}
for key, val := range testData {
if val == "" {
t.Errorf("build info not set for \"%s\"", key)
}
}
}
示例6: testBuildInfoInner
func testBuildInfoInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
checkGossip(t, c, 20*time.Second, hasPeers(c.NumNodes()))
util.SucceedsSoon(t, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
default:
}
var r struct {
BuildInfo map[string]string
}
if err := getJSON(c.URL(0), "/_status/details/local", &r); err != nil {
return err
}
for _, key := range []string{"goVersion", "tag", "time", "dependencies"} {
if val, ok := r.BuildInfo[key]; !ok {
t.Errorf("build info missing for \"%s\"", key)
} else if val == "" {
t.Errorf("build info not set for \"%s\"", key)
}
}
return nil
})
}
示例7: checkGossip
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration, f checkGossipFunc) {
err := util.RetryForDuration(d, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
var infoStatus gossip.InfoStatus
for i := 0; i < c.NumNodes(); i++ {
if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/gossip/local", &infoStatus); err != nil {
return err
}
if err := f(infoStatus.Infos); err != nil {
return errors.Errorf("node %d: %s", i, err)
}
}
return nil
})
if err != nil {
t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err))
}
}
示例8: restoreNetwork
func restoreNetwork(t *testing.T, c cluster.Cluster) []error {
var errs []error
for i := 0; i < c.NumNodes(); i++ {
for _, cmd := range iptables.Reset() {
if err := c.ExecRoot(i, cmd); err != nil {
errs = append(errs, err)
}
}
}
return errs
}
示例9: mustGetHosts
func mustGetHosts(t *testing.T, c cluster.Cluster) (
[]iptables.IP, map[iptables.IP]int,
) {
var addrs []iptables.IP
addrsToNode := make(map[iptables.IP]int)
for i := 0; i < c.NumNodes(); i++ {
addr := iptables.IP(c.InternalIP(i).String())
addrsToNode[addr] = i
addrs = append(addrs, addr)
}
return addrs, addrsToNode
}
示例10: testNodeRestartInner
func testNodeRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
if num <= 0 {
t.Fatalf("%d nodes in cluster", num)
}
// One client for each node.
initBank(t, c.PGUrl(0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, 1),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, 1),
}
client := &state.clients[0]
client.Lock()
client.db = makePGClient(t, c.PGUrl(num-1))
client.Unlock()
go transferMoneyLoop(0, &state, *numAccounts, *maxTransfer)
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf("monkey starts (seed %d)", seed)
pickNodes := func() []int {
return []int{rnd.Intn(num - 1)}
}
go chaosMonkey(&state, c, false, pickNodes)
waitClientsStop(1, &state, cfg.Stall)
// Verify accounts.
verifyAccounts(t, client)
elapsed := time.Since(start)
count := atomic.LoadUint64(&client.count)
log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
kvClient, kvStopper := c.NewClient(t, num-1)
defer kvStopper.Stop()
if pErr := kvClient.CheckConsistency(keys.TableDataMin, keys.TableDataMax); pErr != nil {
// TODO(.*): change back to t.Fatal after #5051.
log.Error(pErr)
}
}
示例11: testClusterRecoveryInner
func testClusterRecoveryInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
if num <= 0 {
t.Fatalf("%d nodes in cluster", num)
}
// One client for each node.
initBank(t, c.PGUrl(0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, num),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, num),
}
for i := 0; i < num; i++ {
state.clients[i].Lock()
state.initClient(t, c, i)
state.clients[i].Unlock()
go transferMoneyLoop(i, &state, *numAccounts, *maxTransfer)
}
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf("monkey starts (seed %d)", seed)
pickNodes := func() []int {
return rnd.Perm(num)[:rnd.Intn(num)+1]
}
go chaosMonkey(&state, c, true, pickNodes)
waitClientsStop(num, &state, cfg.Stall)
// Verify accounts.
verifyAccounts(t, &state.clients[0])
elapsed := time.Since(start)
var count uint64
counts := state.counts()
for _, c := range counts {
count += c
}
log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例12: testNodeRestartInner
func testNodeRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
if minNum := 3; num < minNum {
t.Skipf("need at least %d nodes, got %d", minNum, num)
}
// One client for each node.
initBank(t, c.PGUrl(0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, 1),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, 1),
}
client := &state.clients[0]
client.Lock()
client.db = makePGClient(t, c.PGUrl(num-1))
client.Unlock()
go transferMoneyLoop(0, &state, *numAccounts, *maxTransfer)
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(context.Background(), "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return []int{rnd.Intn(num - 1)}
}
go chaosMonkey(&state, c, false, pickNodes)
waitClientsStop(1, &state, stall)
// Verify accounts.
verifyAccounts(t, client)
elapsed := timeutil.Since(start)
count := atomic.LoadUint64(&client.count)
log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例13: testAdminLossOfQuorumInner
func testAdminLossOfQuorumInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
if c.NumNodes() < 2 {
t.Logf("skipping test %s because given cluster has too few nodes", cfg.Name)
return
}
// Get the ids for each node.
idMap := make(map[int]string)
for i := 0; i < c.NumNodes(); i++ {
var detail details
if err := getJSON(c.URL(i), "/_status/details/local", &detail); err != nil {
t.Fatal(err)
}
idMap[i] = detail.NodeID.String()
}
// Leave only the first node alive.
for i := 1; i < c.NumNodes(); i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
// Retrieve node statuses.
var nodeStatuses interface{}
if err := getJSON(c.URL(0), "/_status/nodes/", &nodeStatuses); err != nil {
t.Fatal(err)
}
for i := 0; i < c.NumNodes(); i++ {
var nodeStatus interface{}
url := fmt.Sprintf("/_status/nodes/%s", idMap[i])
if err := getJSON(c.URL(0), url, &nodeStatus); err != nil {
t.Fatal(err)
}
}
// Retrieve time-series data.
nowNanos := timeutil.Now().UnixNano()
queryRequest := ts.TimeSeriesQueryRequest{
StartNanos: nowNanos - 10*time.Second.Nanoseconds(),
EndNanos: nowNanos,
Queries: []ts.Query{
{Name: "doesnt_matter", Sources: []string{}},
},
}
var queryResponse ts.TimeSeriesQueryResponse
if err := postJSON(cluster.HTTPClient(), c.URL(0), "/ts/query",
&queryRequest, &queryResponse); err != nil {
t.Fatal(err)
}
// TODO(cdo): When we're able to issue SQL queries without a quorum, test all
// admin endpoints that issue SQL queries here.
}
示例14: testAdminLossOfQuorumInner
func testAdminLossOfQuorumInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
if c.NumNodes() < 2 {
t.Logf("skipping test %s because given cluster has too few nodes", cfg.Name)
return
}
// Get the ids for each node.
nodeIDs := make([]roachpb.NodeID, c.NumNodes())
for i := 0; i < c.NumNodes(); i++ {
var details serverpb.DetailsResponse
if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/details/local", &details); err != nil {
t.Fatal(err)
}
nodeIDs[i] = details.NodeID
}
// Leave only the first node alive.
for i := 1; i < c.NumNodes(); i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
// Retrieve node statuses.
var nodes serverpb.NodesResponse
if err := util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes", &nodes); err != nil {
t.Fatal(err)
}
for _, nodeID := range nodeIDs {
var nodeStatus status.NodeStatus
if err := util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes/"+strconv.Itoa(int(nodeID)), &nodeStatus); err != nil {
t.Fatal(err)
}
}
// Retrieve time-series data.
nowNanos := timeutil.Now().UnixNano()
queryRequest := tspb.TimeSeriesQueryRequest{
StartNanos: nowNanos - 10*time.Second.Nanoseconds(),
EndNanos: nowNanos,
Queries: []tspb.Query{
{Name: "doesnt_matter", Sources: []string{}},
},
}
var queryResponse tspb.TimeSeriesQueryResponse
if err := util.PostJSON(cluster.HTTPClient, c.URL(0)+"/ts/query",
&queryRequest, &queryResponse); err != nil {
t.Fatal(err)
}
// TODO(cdo): When we're able to issue SQL queries without a quorum, test all
// admin endpoints that issue SQL queries here.
}
示例15: BidirectionalPartitionNemesis
// BidirectionalPartitionNemesis is a nemesis which randomly severs the network
// symmetrically between two random groups of nodes. Partitioned and connected
// mode take alternating turns, with random durations of up to 15s.
func BidirectionalPartitionNemesis(t *testing.T, stop <-chan struct{}, c cluster.Cluster) {
randSec := func() time.Duration { return time.Duration(rand.Int63n(15 * int64(time.Second))) }
for {
ch := make(chan struct{})
go func() {
select {
case <-time.After(randSec()):
case <-stop:
}
close(ch)
}()
cutNetwork(t, c, ch, randomBidirectionalPartition(c.NumNodes())...)
select {
case <-stop:
return
case <-time.After(randSec()):
}
}
}