本文整理匯總了Golang中github.com/cockroachdb/cockroach/acceptance/cluster.Cluster.Kill方法的典型用法代碼示例。如果您正苦於以下問題:Golang Cluster.Kill方法的具體用法?Golang Cluster.Kill怎麽用?Golang Cluster.Kill使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/acceptance/cluster.Cluster
的用法示例。
在下文中一共展示了Cluster.Kill方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: testAdminLossOfQuorumInner
func testAdminLossOfQuorumInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
if c.NumNodes() < 2 {
t.Logf("skipping test %s because given cluster has too few nodes", cfg.Name)
return
}
// Get the ids for each node.
nodeIDs := make([]roachpb.NodeID, c.NumNodes())
for i := 0; i < c.NumNodes(); i++ {
var details serverpb.DetailsResponse
if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/details/local", &details); err != nil {
t.Fatal(err)
}
nodeIDs[i] = details.NodeID
}
// Leave only the first node alive.
for i := 1; i < c.NumNodes(); i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
// Retrieve node statuses.
var nodes serverpb.NodesResponse
if err := util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes", &nodes); err != nil {
t.Fatal(err)
}
for _, nodeID := range nodeIDs {
var nodeStatus status.NodeStatus
if err := util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes/"+strconv.Itoa(int(nodeID)), &nodeStatus); err != nil {
t.Fatal(err)
}
}
// Retrieve time-series data.
nowNanos := timeutil.Now().UnixNano()
queryRequest := tspb.TimeSeriesQueryRequest{
StartNanos: nowNanos - 10*time.Second.Nanoseconds(),
EndNanos: nowNanos,
Queries: []tspb.Query{
{Name: "doesnt_matter", Sources: []string{}},
},
}
var queryResponse tspb.TimeSeriesQueryResponse
if err := util.PostJSON(cluster.HTTPClient, c.URL(0)+"/ts/query",
&queryRequest, &queryResponse); err != nil {
t.Fatal(err)
}
// TODO(cdo): When we're able to issue SQL queries without a quorum, test all
// admin endpoints that issue SQL queries here.
}
示例2: testAdminLossOfQuorumInner
func testAdminLossOfQuorumInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
if c.NumNodes() < 2 {
t.Logf("skipping test %s because given cluster has too few nodes", cfg.Name)
return
}
// Get the ids for each node.
idMap := make(map[int]string)
for i := 0; i < c.NumNodes(); i++ {
var detail details
if err := getJSON(c.URL(i), "/_status/details/local", &detail); err != nil {
t.Fatal(err)
}
idMap[i] = detail.NodeID.String()
}
// Leave only the first node alive.
for i := 1; i < c.NumNodes(); i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
// Retrieve node statuses.
var nodeStatuses interface{}
if err := getJSON(c.URL(0), "/_status/nodes/", &nodeStatuses); err != nil {
t.Fatal(err)
}
for i := 0; i < c.NumNodes(); i++ {
var nodeStatus interface{}
url := fmt.Sprintf("/_status/nodes/%s", idMap[i])
if err := getJSON(c.URL(0), url, &nodeStatus); err != nil {
t.Fatal(err)
}
}
// Retrieve time-series data.
nowNanos := timeutil.Now().UnixNano()
queryRequest := ts.TimeSeriesQueryRequest{
StartNanos: nowNanos - 10*time.Second.Nanoseconds(),
EndNanos: nowNanos,
Queries: []ts.Query{
{Name: "doesnt_matter", Sources: []string{}},
},
}
var queryResponse ts.TimeSeriesQueryResponse
if err := postJSON(cluster.HTTPClient(), c.URL(0), "/ts/query",
&queryRequest, &queryResponse); err != nil {
t.Fatal(err)
}
// TODO(cdo): When we're able to issue SQL queries without a quorum, test all
// admin endpoints that issue SQL queries here.
}
示例3: testGossipRestartInner
func testGossipRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
// This already replicates the first range (in the local setup).
// The replication of the first range is important: as long as the
// first range only exists on one node, that node can trivially
// acquire the range lease. Once the range is replicated, however,
// nodes must be able to discover each other over gossip before the
// lease can be acquired.
num := c.NumNodes()
deadline := timeutil.Now().Add(cfg.Duration)
waitTime := longWaitTime
if cfg.Duration < waitTime {
waitTime = shortWaitTime
}
for timeutil.Now().Before(deadline) {
log.Infof(context.Background(), "waiting for initial gossip connections")
checkGossip(t, c, waitTime, hasPeers(num))
checkGossip(t, c, waitTime, hasClusterID)
checkGossip(t, c, waitTime, hasSentinel)
log.Infof(context.Background(), "killing all nodes")
for i := 0; i < num; i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
log.Infof(context.Background(), "restarting all nodes")
for i := 0; i < num; i++ {
if err := c.Restart(i); err != nil {
t.Fatal(err)
}
}
log.Infof(context.Background(), "waiting for gossip to be connected")
checkGossip(t, c, waitTime, hasPeers(num))
checkGossip(t, c, waitTime, hasClusterID)
checkGossip(t, c, waitTime, hasSentinel)
for i := 0; i < num; i++ {
db, dbStopper := c.NewClient(t, i)
if i == 0 {
if err := db.Del("count"); err != nil {
t.Fatal(err)
}
}
var kv client.KeyValue
if err := db.Txn(func(txn *client.Txn) error {
var err error
kv, err = txn.Inc("count", 1)
return err
}); err != nil {
t.Fatal(err)
} else if v := kv.ValueInt(); v != int64(i+1) {
t.Fatalf("unexpected value %d for write #%d (expected %d)", v, i, i+1)
}
dbStopper.Stop()
}
}
}
示例4: chaosMonkey
// chaosMonkey picks a set of nodes and restarts them. If stopClients is set
// all the clients are locked before the nodes are restarted.
func chaosMonkey(state *testState, c cluster.Cluster, stopClients bool, pickNodes func() []int) {
defer close(state.teardown)
for curRound := uint64(1); !state.done(); curRound++ {
atomic.StoreUint64(&state.monkeyIteration, curRound)
select {
case <-stopper:
return
default:
}
// Pick nodes to be restarted.
nodes := pickNodes()
if stopClients {
// Prevent all clients from writing while nodes are being restarted.
for i := 0; i < len(state.clients); i++ {
state.clients[i].Lock()
}
}
log.Infof("round %d: restarting nodes %v", curRound, nodes)
for _, i := range nodes {
// Two early exit conditions.
select {
case <-stopper:
break
default:
}
if state.done() {
break
}
log.Infof("round %d: restarting %d", curRound, i)
if err := c.Kill(i); err != nil {
state.t.Error(err)
}
if err := c.Restart(i); err != nil {
state.t.Error(err)
}
if stopClients {
// Reinitialize the client talking to the restarted node.
state.initClient(state.t, c, i)
}
}
if stopClients {
for i := 0; i < len(state.clients); i++ {
state.clients[i].Unlock()
}
}
preCount := state.counts()
madeProgress := func() bool {
c.Assert(state.t)
newCounts := state.counts()
for i := range newCounts {
if newCounts[i] > preCount[i] {
return true
}
}
return false
}
// Sleep until at least one client is writing successfully.
log.Warningf("round %d: monkey sleeping while cluster recovers...", curRound)
for !state.done() && !madeProgress() {
time.Sleep(time.Second)
}
log.Warningf("round %d: cluster recovered", curRound)
}
}
示例5: testEventLogInner
func testEventLogInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
if num <= 0 {
t.Fatalf("%d nodes in cluster", num)
}
var confirmedClusterID uuid.UUID
type nodeEventInfo struct {
Descriptor roachpb.NodeDescriptor
ClusterID uuid.UUID
}
// Verify that a node_join message was logged for each node in the cluster.
// We expect there to eventually be one such message for each node in the
// cluster, and each message must be correctly formatted.
util.SucceedsSoon(t, func() error {
db := makePGClient(t, c.PGUrl(0))
defer db.Close()
// Query all node join events. There should be one for each node in the
// cluster.
rows, err := db.Query(
"SELECT targetID, info FROM system.eventlog WHERE eventType = $1",
string(csql.EventLogNodeJoin))
if err != nil {
return err
}
seenIds := make(map[int64]struct{})
var clusterID uuid.UUID
for rows.Next() {
var targetID int64
var infoStr gosql.NullString
if err := rows.Scan(&targetID, &infoStr); err != nil {
t.Fatal(err)
}
// Verify the stored node descriptor.
if !infoStr.Valid {
t.Fatalf("info not recorded for node join, target node %d", targetID)
}
var info nodeEventInfo
if err := json.Unmarshal([]byte(infoStr.String), &info); err != nil {
t.Fatal(err)
}
if a, e := int64(info.Descriptor.NodeID), targetID; a != e {
t.Fatalf("Node join with targetID %d had descriptor for wrong node %d", e, a)
}
// Verify cluster ID is recorded, and is the same for all nodes.
if uuid.Equal(info.ClusterID, *uuid.EmptyUUID) {
t.Fatalf("Node join recorded nil cluster id, info: %v", info)
}
if uuid.Equal(clusterID, *uuid.EmptyUUID) {
clusterID = info.ClusterID
} else if !uuid.Equal(clusterID, info.ClusterID) {
t.Fatalf(
"Node join recorded different cluster ID than earlier node. Expected %s, got %s. Info: %v",
clusterID, info.ClusterID, info)
}
// Verify that all NodeIDs are different.
if _, ok := seenIds[targetID]; ok {
t.Fatalf("Node ID %d seen in two different node join messages", targetID)
}
seenIds[targetID] = struct{}{}
}
if err := rows.Err(); err != nil {
return err
}
if a, e := len(seenIds), c.NumNodes(); a != e {
return errors.Errorf("expected %d node join messages, found %d: %v", e, a, seenIds)
}
confirmedClusterID = clusterID
return nil
})
// Stop and Start Node 0, and verify the node restart message.
if err := c.Kill(0); err != nil {
t.Fatal(err)
}
if err := c.Restart(0); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
db := makePGClient(t, c.PGUrl(0))
defer db.Close()
// Query all node restart events. There should only be one.
rows, err := db.Query(
"SELECT targetID, info FROM system.eventlog WHERE eventType = $1",
string(csql.EventLogNodeRestart))
if err != nil {
return err
}
seenCount := 0
for rows.Next() {
//.........這裏部分代碼省略.........
示例6: testRaftUpdateInner
func testRaftUpdateInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
minAffected := int64(server.ExpectedInitialRangeCount())
mustPost := func(freeze bool) server.ClusterFreezeResponse {
reply, err := postFreeze(c, freeze)
if err != nil {
t.Fatal(util.ErrorfSkipFrames(1, "%v", err))
}
return reply
}
if reply := mustPost(false); reply.RangesAffected != 0 {
t.Fatalf("expected initial unfreeze to affect no ranges, got %d", reply.RangesAffected)
}
if reply := mustPost(true); reply.RangesAffected < minAffected {
t.Fatalf("expected >=%d frozen ranges, got %d", minAffected, reply.RangesAffected)
}
if reply := mustPost(true); reply.RangesAffected != 0 {
t.Fatalf("expected second freeze to affect no ranges, got %d", reply.RangesAffected)
}
if reply := mustPost(false); reply.RangesAffected < minAffected {
t.Fatalf("expected >=%d thawed ranges, got %d", minAffected, reply.RangesAffected)
}
num := c.NumNodes()
if num < 3 {
t.Skip("skipping remainder of test; needs at least 3 nodes")
}
// Kill the last node.
if err := c.Kill(num - 1); err != nil {
t.Fatal(err)
}
// Attempt to freeze should get stuck (since it does not get confirmation
// of the last node receiving the freeze command).
if reply, err := postFreeze(c, true); !testutils.IsError(err, "timed out waiting for Range|Timeout exceeded while") {
t.Fatalf("expected timeout, got %v: %v", err, reply)
}
// Shut down the remaining nodes and restart then.
for i := 0; i < num-1; i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
for i := 0; i < num; i++ {
if err := c.Restart(i); err != nil {
t.Fatal(err)
}
}
// The cluster should now be fully operational (at least after waiting
// a little bit) since each node tries to unfreeze everything when it
// starts.
if err := util.RetryForDuration(time.Minute, func() error {
// TODO(tschottdorf): moving the client creation outside of the retry
// loop will break the test with the following message:
//
// client/rpc_sender.go:61: roachpb.Batch RPC failed as client
// connection was closed
//
// Perhaps the cluster updates the address too late after restarting
// the node.
db, dbStopper := c.NewClient(t, 0)
defer dbStopper.Stop()
_, err := db.Scan(keys.LocalMax, roachpb.KeyMax, 0)
if err != nil {
log.Info(err)
}
return err
}); err != nil {
t.Fatal(err)
}
// Unfreezing again should be a no-op.
if reply, err := postFreeze(c, false); err != nil {
t.Fatal(err)
} else if reply.RangesAffected > 0 {
t.Fatalf("still %d frozen ranges", reply.RangesAffected)
}
}