本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/raft/testutils.WaitForCluster函數的典型用法代碼示例。如果您正苦於以下問題:Golang WaitForCluster函數的具體用法?Golang WaitForCluster怎麽用?Golang WaitForCluster使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了WaitForCluster函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRaftQuorumRecovery
func TestRaftQuorumRecovery(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// Lose a majority
for i := uint64(1); i <= 3; i++ {
nodes[i].Server.Stop()
nodes[i].Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
// Restore the majority by restarting node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
delete(nodes, 1)
delete(nodes, 2)
raftutils.WaitForCluster(t, clockSource, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, raftutils.Leader(nodes))
assert.NoError(t, err)
for _, node := range nodes {
raftutils.CheckValue(t, clockSource, node, value)
}
}
示例2: testRaftRestartCluster
func testRaftRestartCluster(t *testing.T, stagger bool) {
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], "id1")
assert.NoError(t, err, "failed to propose value")
// Stop all nodes
for _, node := range nodes {
node.Server.Stop()
node.Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
// Restart all nodes
i := 0
for k, node := range nodes {
if stagger && i != 0 {
raftutils.AdvanceTicks(clockSource, 1)
}
nodes[k] = raftutils.RestartNode(t, clockSource, node, false)
i++
}
raftutils.WaitForCluster(t, clockSource, nodes)
// Propose another value
values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2")
assert.NoError(t, err, "failed to propose value")
for _, node := range nodes {
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
var err error
node.MemoryStore().View(func(tx store.ReadTx) {
var allNodes []*api.Node
allNodes, err = store.FindNodes(tx, store.All)
if err != nil {
return
}
if len(allNodes) != 2 {
err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
return
}
for i, nodeID := range []string{"id1", "id2"} {
n := store.GetNode(tx, nodeID)
if !reflect.DeepEqual(n, values[i]) {
err = fmt.Errorf("node %s did not match expected value", nodeID)
return
}
}
})
return err
}))
}
}
示例3: TestRaftEncryptionKeyRotationStress
// This test rotates the encryption key and restarts the node - the intent is try to trigger
// race conditions if there is more than one node and hence consensus may take longer.
func TestRaftEncryptionKeyRotationStress(t *testing.T) {
t.Parallel()
// Bring up a 3 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
leader := nodes[1]
// constantly propose values
done, stop, restart, clusterReady := make(chan struct{}), make(chan struct{}), make(chan struct{}), make(chan struct{})
go func() {
counter := len(nodes)
for {
select {
case <-stop:
close(done)
return
case <-restart:
// the node restarts may trigger a leadership change, so wait until the cluster has 3
// nodes again and a leader is selected before proposing more values
<-clusterReady
leader = raftutils.Leader(nodes)
default:
counter += 1
raftutils.ProposeValue(t, leader, DefaultProposalTime, fmt.Sprintf("id%d", counter))
}
}
}()
for i := 0; i < 30; i++ {
// rotate the encryption key
nodes[3].KeyRotator.QueuePendingKey([]byte(fmt.Sprintf("newKey%d", i)))
nodes[3].KeyRotator.RotationNotify() <- struct{}{}
require.NoError(t, raftutils.PollFunc(clockSource, func() error {
if nodes[3].KeyRotator.GetKeys().PendingDEK == nil {
return nil
}
return fmt.Errorf("not done rotating yet")
}))
// restart the node and wait for everything to settle and a leader to be elected
nodes[3].Server.Stop()
nodes[3].ShutdownRaft()
restart <- struct{}{}
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.AdvanceTicks(clockSource, 1)
raftutils.WaitForCluster(t, clockSource, nodes)
clusterReady <- struct{}{}
}
close(stop)
<-done
}
示例4: TestCanRemoveMember
func TestCanRemoveMember(t *testing.T) {
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Stop node 2 and node 3 (2 nodes out of 3)
nodes[2].Server.Stop()
nodes[2].Shutdown()
nodes[3].Server.Stop()
nodes[3].Shutdown()
// Node 2 and Node 3 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[2].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 2 to be unreachable")
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 3 to be unreachable")
}
return nil
}))
// Removing node 3 should fail
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
err := nodes[1].RemoveMember(ctx, 3)
assert.Error(t, err)
assert.Equal(t, err, raft.ErrCannotRemoveMember)
members := nodes[1].GetMemberlist()
assert.Equal(t, len(members), 3)
// Restart node 2 and node 3
nodes[2] = raftutils.RestartNode(t, clockSource, nodes[2], false)
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// Removing node 3 should succeed
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
err = nodes[1].RemoveMember(ctx, nodes[3].Config.ID)
assert.NoError(t, err)
members = nodes[1].GetMemberlist()
assert.Nil(t, members[nodes[3].Config.ID])
assert.Equal(t, len(members), 2)
// Removing node 2 should fail
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
err = nodes[1].RemoveMember(ctx, nodes[2].Config.ID)
assert.Error(t, err)
assert.Equal(t, err, raft.ErrCannotRemoveMember)
assert.Equal(t, len(members), 2)
}
示例5: TestRaftLeaderDown
func TestRaftLeaderDown(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Stop node 1
nodes[1].Stop()
newCluster := map[uint64]*raftutils.TestNode{
2: nodes[2],
3: nodes[3],
}
// Wait for the re-election to occur
raftutils.WaitForCluster(t, clockSource, newCluster)
// Leader should not be 1
assert.NotEqual(t, nodes[2].Leader(), nodes[1].Config.ID)
// Ensure that node 2 and node 3 have the same leader
assert.Equal(t, nodes[3].Leader(), nodes[2].Leader())
// Find the leader node and a follower node
var (
leaderNode *raftutils.TestNode
followerNode *raftutils.TestNode
)
for i, n := range newCluster {
if n.Config.ID == n.Leader() {
leaderNode = n
if i == 2 {
followerNode = newCluster[3]
} else {
followerNode = newCluster[2]
}
}
}
require.NotNil(t, leaderNode)
require.NotNil(t, followerNode)
// Propose a value
value, err := raftutils.ProposeValue(t, leaderNode)
assert.NoError(t, err, "failed to propose value")
// The value should be replicated on all remaining nodes
raftutils.CheckValue(t, clockSource, leaderNode, value)
assert.Equal(t, len(leaderNode.GetMemberlist()), 3)
raftutils.CheckValue(t, clockSource, followerNode, value)
assert.Equal(t, len(followerNode.GetMemberlist()), 3)
}
示例6: TestRaftUnreachableNode
func TestRaftUnreachableNode(t *testing.T) {
t.Parallel()
nodes := make(map[uint64]*raftutils.TestNode)
var clockSource *fakeclock.FakeClock
nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Add a new node
nodes[2] = raftutils.NewNode(t, clockSource, tc, raft.NodeOptions{JoinAddr: nodes[1].Address})
err := nodes[2].JoinAndStart(ctx)
require.NoError(t, err, "can't join cluster")
go nodes[2].Run(ctx)
// Stop the Raft server of second node on purpose after joining
nodes[2].Server.Stop()
nodes[2].Listener.Close()
raftutils.AdvanceTicks(clockSource, 5)
time.Sleep(100 * time.Millisecond)
wrappedListener := raftutils.RecycleWrappedListener(nodes[2].Listener)
securityConfig := nodes[2].SecurityConfig
serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)}
s := grpc.NewServer(serverOpts...)
nodes[2].Server = s
raft.Register(s, nodes[2].Node)
go func() {
// After stopping, we should receive an error from Serve
assert.Error(t, s.Serve(wrappedListener))
}()
raftutils.WaitForCluster(t, clockSource, nodes)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
assert.NoError(t, err, "failed to propose value")
// All nodes should have the value in the physical store
raftutils.CheckValue(t, clockSource, nodes[1], value)
raftutils.CheckValue(t, clockSource, nodes[2], value)
}
示例7: TestRaftRejoin
func TestRaftRejoin(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
ids := []string{"id1", "id2"}
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], ids[0])
assert.NoError(t, err, "failed to propose value")
// The value should be replicated on node 3
raftutils.CheckValue(t, clockSource, nodes[3], values[0])
assert.Equal(t, len(nodes[3].GetMemberlist()), 3)
// Stop node 3
nodes[3].Server.Stop()
nodes[3].Shutdown()
// Propose another value
values[1], err = raftutils.ProposeValue(t, nodes[1], ids[1])
assert.NoError(t, err, "failed to propose value")
// Nodes 1 and 2 should have the new value
raftutils.CheckValuesOnNodes(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2]}, ids, values)
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// Node 3 should have all values, including the one proposed while
// it was unavailable.
raftutils.CheckValuesOnNodes(t, clockSource, nodes, ids, values)
}
示例8: TestRaftUnreachableNode
func TestRaftUnreachableNode(t *testing.T) {
t.Parallel()
nodes := make(map[uint64]*raftutils.TestNode)
var clockSource *fakeclock.FakeClock
nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil)
ctx := context.Background()
// Add a new node, but don't start its server yet
n := raftutils.NewNode(t, clockSource, tc, raft.NewNodeOptions{JoinAddr: nodes[1].Address})
go n.Run(ctx)
raftutils.AdvanceTicks(clockSource, 5)
time.Sleep(100 * time.Millisecond)
raft.Register(n.Server, n.Node)
// Now start the new node's server
go func() {
// After stopping, we should receive an error from Serve
assert.Error(t, n.Server.Serve(n.Listener))
}()
nodes[2] = n
raftutils.WaitForCluster(t, clockSource, nodes)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1])
assert.NoError(t, err, "failed to propose value")
// All nodes should have the value in the physical store
raftutils.CheckValue(t, clockSource, nodes[1], value)
raftutils.CheckValue(t, clockSource, nodes[2], value)
}
示例9: TestRaftLeaderLeave
func TestRaftLeaderLeave(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
// node 1 is the leader
assert.Equal(t, nodes[1].Leader(), nodes[1].Config.ID)
// Try to leave the raft
// Use gRPC instead of calling handler directly because of
// authorization check.
client, err := nodes[1].ConnectToMember(nodes[1].Address, 10*time.Second)
assert.NoError(t, err)
defer client.Conn.Close()
raftClient := api.NewRaftMembershipClient(client.Conn)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[1].Config.ID}})
assert.NoError(t, err, "error sending message to leave the raft")
assert.NotNil(t, resp, "leave response message is nil")
newCluster := map[uint64]*raftutils.TestNode{
2: nodes[2],
3: nodes[3],
}
// Wait for election tick
raftutils.WaitForCluster(t, clockSource, newCluster)
// Leader should not be 1
assert.NotEqual(t, nodes[2].Leader(), nodes[1].Config.ID)
assert.Equal(t, nodes[2].Leader(), nodes[3].Leader())
leader := nodes[2].Leader()
// Find the leader node and a follower node
var (
leaderNode *raftutils.TestNode
followerNode *raftutils.TestNode
)
for i, n := range nodes {
if n.Config.ID == leader {
leaderNode = n
if i == 2 {
followerNode = nodes[3]
} else {
followerNode = nodes[2]
}
}
}
require.NotNil(t, leaderNode)
require.NotNil(t, followerNode)
// Propose a value
value, err := raftutils.ProposeValue(t, leaderNode)
assert.NoError(t, err, "failed to propose value")
// The value should be replicated on all remaining nodes
raftutils.CheckValue(t, clockSource, leaderNode, value)
assert.Equal(t, len(leaderNode.GetMemberlist()), 2)
raftutils.CheckValue(t, clockSource, followerNode, value)
assert.Equal(t, len(followerNode.GetMemberlist()), 2)
raftutils.TeardownCluster(t, newCluster)
}
示例10: TestRaftEncryptionKeyRotationWait
// This test rotates the encryption key and waits for the expected thing to happen
func TestRaftEncryptionKeyRotationWait(t *testing.T) {
t.Parallel()
nodes := make(map[uint64]*raftutils.TestNode)
var clockSource *fakeclock.FakeClock
raftConfig := raft.DefaultRaftConfig()
nodes[1], clockSource = raftutils.NewInitNode(t, tc, &raftConfig)
defer raftutils.TeardownCluster(t, nodes)
nodeIDs := []string{"id1", "id2", "id3"}
values := make([]*api.Node, len(nodeIDs))
// Propose 3 values
var err error
for i, nodeID := range nodeIDs[:3] {
values[i], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeID)
require.NoError(t, err, "failed to propose value")
}
snapDir := filepath.Join(nodes[1].StateDir, "snap-v3-encrypted")
startingKeys := nodes[1].KeyRotator.GetKeys()
// rotate the encryption key
nodes[1].KeyRotator.QueuePendingKey([]byte("key2"))
nodes[1].KeyRotator.RotationNotify() <- struct{}{}
// the rotation should trigger a snapshot, which should notify the rotator when it's done
require.NoError(t, raftutils.PollFunc(clockSource, func() error {
snapshots, err := storage.ListSnapshots(snapDir)
if err != nil {
return err
}
if len(snapshots) != 1 {
return fmt.Errorf("expected 1 snapshot, found %d on new node", len(snapshots))
}
if nodes[1].KeyRotator.NeedsRotation() {
return fmt.Errorf("rotation never finished")
}
return nil
}))
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
// Propose a 4th value
nodeIDs = append(nodeIDs, "id4")
v, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id4")
require.NoError(t, err, "failed to propose value")
values = append(values, v)
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
nodes[1].Server.Stop()
nodes[1].ShutdownRaft()
// Try to restart node 1. Without the new unlock key, it can't actually start
n, ctx := raftutils.CopyNode(t, clockSource, nodes[1], false, raftutils.NewSimpleKeyRotator(startingKeys))
require.Error(t, n.Node.JoinAndStart(ctx),
"should not have been able to restart since we can't read snapshots")
// with the right key, it can start, even if the right key is only the pending key
newKeys := startingKeys
newKeys.PendingDEK = []byte("key2")
nodes[1].KeyRotator = raftutils.NewSimpleKeyRotator(newKeys)
nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// as soon as we joined, it should have finished rotating the key
require.False(t, nodes[1].KeyRotator.NeedsRotation())
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
// break snapshotting, and ensure that key rotation never finishes
tempSnapDir := filepath.Join(nodes[1].StateDir, "snap-backup")
require.NoError(t, os.Rename(snapDir, tempSnapDir))
require.NoError(t, ioutil.WriteFile(snapDir, []byte("this is no longer a directory"), 0644))
nodes[1].KeyRotator.QueuePendingKey([]byte("key3"))
nodes[1].KeyRotator.RotationNotify() <- struct{}{}
time.Sleep(250 * time.Millisecond)
// rotation has not been finished, because we cannot take a snapshot
require.True(t, nodes[1].KeyRotator.NeedsRotation())
// Propose a 5th value, so we have WALs written with the new key
nodeIDs = append(nodeIDs, "id5")
v, err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id5")
require.NoError(t, err, "failed to propose value")
values = append(values, v)
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
nodes[1].Server.Stop()
nodes[1].ShutdownRaft()
// restore the snapshot dir
require.NoError(t, os.RemoveAll(snapDir))
require.NoError(t, os.Rename(tempSnapDir, snapDir))
// Now the wals are a mix of key2 and key3 - we can't actually start with either key
singleKey := raft.EncryptionKeys{CurrentDEK: []byte("key2")}
//.........這裏部分代碼省略.........
示例11: testUpdateNodeDemote
func testUpdateNodeDemote(leader bool, t *testing.T) {
tc := cautils.NewTestCA(nil)
defer tc.Stop()
ts := newTestServer(t)
defer ts.Stop()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
// Create a node object for each of the managers
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
return nil
}))
// Stop Node 3 (1 node out of 3)
nodes[3].Server.Stop()
nodes[3].ShutdownRaft()
// Node 3 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 3 to be unreachable")
}
return nil
}))
// Try to demote Node 2, this should fail because of the quorum safeguard
r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec := r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version := &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, grpc.Code(err))
// Restart Node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// Node 3 should be listed as Reachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_UNREACHABLE {
return fmt.Errorf("expected node 3 to be reachable")
}
return nil
}))
// Try to demote Node 3, this should succeed
r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec = r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version = &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.NoError(t, err)
newCluster := map[uint64]*raftutils.TestNode{
1: nodes[1],
//.........這裏部分代碼省略.........
示例12: TestListManagerNodes
func TestListManagerNodes(t *testing.T) {
t.Parallel()
tc := cautils.NewTestCA(nil)
defer tc.Stop()
ts := newTestServer(t)
defer ts.Stop()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Create a node object for each of the managers
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()}))
return nil
}))
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
// There should be 3 reachable managers listed
r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.NotNil(t, r)
managers := getMap(t, r.Nodes)
assert.Len(t, ts.Server.raft.GetMemberlist(), 3)
assert.Len(t, r.Nodes, 3)
// Node 1 should be the leader
for i := 1; i <= 3; i++ {
if i == 1 {
assert.True(t, managers[nodes[uint64(i)].Config.ID].Leader)
continue
}
assert.False(t, managers[nodes[uint64(i)].Config.ID].Leader)
}
// All nodes should be reachable
for i := 1; i <= 3; i++ {
assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability)
}
// Add two more nodes to the cluster
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
// Add node entries for these
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[4].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[5].SecurityConfig.ClientTLSCreds.NodeID()}))
return nil
}))
// There should be 5 reachable managers listed
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.NotNil(t, r)
managers = getMap(t, r.Nodes)
assert.Len(t, ts.Server.raft.GetMemberlist(), 5)
assert.Len(t, r.Nodes, 5)
for i := 1; i <= 5; i++ {
assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability)
}
// Stops 2 nodes
nodes[4].Server.Stop()
nodes[4].ShutdownRaft()
nodes[5].Server.Stop()
nodes[5].ShutdownRaft()
// Node 4 and Node 5 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
if err != nil {
return err
}
managers = getMap(t, r.Nodes)
if len(r.Nodes) != 5 {
return fmt.Errorf("expected 5 nodes, got %d", len(r.Nodes))
}
if managers[nodes[4].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 4 to be unreachable")
}
if managers[nodes[5].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 5 to be unreachable")
}
return nil
}))
// Restart the 2 nodes
nodes[4] = raftutils.RestartNode(t, clockSource, nodes[4], false)
//.........這裏部分代碼省略.........
示例13: TestUpdateNodeDemote
func TestUpdateNodeDemote(t *testing.T) {
tc := cautils.NewTestCA(nil, cautils.AcceptancePolicy(true, true, ""))
ts := newTestServer(t)
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
// Create a node object for each of the managers
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
return nil
}))
// Stop Node 3 (1 node out of 3)
nodes[3].Server.Stop()
nodes[3].Shutdown()
// Node 3 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 3 to be unreachable")
}
return nil
}))
// Try to demote Node 2, this should fail because of the quorum safeguard
r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec := r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version := &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, grpc.Code(err))
// Restart Node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// Node 3 should be listed as Reachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_UNREACHABLE {
return fmt.Errorf("expected node 3 to be reachable")
}
return nil
}))
// Try to demote Node 3, this should succeed
r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec = r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version = &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.NoError(t, err)
newCluster := map[uint64]*raftutils.TestNode{
1: nodes[1],
2: nodes[2],
}
//.........這裏部分代碼省略.........
示例14: TestRaftForceNewCluster
func TestRaftForceNewCluster(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], "id1")
assert.NoError(t, err, "failed to propose value")
// The memberlist should contain 3 members on each node
for i := 1; i <= 3; i++ {
assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3)
}
// Stop all nodes
for _, node := range nodes {
node.Server.Stop()
node.Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
toClean := map[uint64]*raftutils.TestNode{
2: nodes[2],
3: nodes[3],
}
raftutils.TeardownCluster(t, toClean)
delete(nodes, 2)
delete(nodes, 3)
// Only restart the first node with force-new-cluster option
nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], true)
raftutils.WaitForCluster(t, clockSource, nodes)
// The memberlist should contain only one node (self)
assert.Equal(t, len(nodes[1].GetMemberlist()), 1)
// Add 2 more members
nodes[2] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
nodes[3] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
newCluster := map[uint64]*raftutils.TestNode{
1: nodes[1],
2: nodes[2],
3: nodes[3],
}
defer raftutils.TeardownCluster(t, newCluster)
// The memberlist should contain 3 members on each node
for i := 1; i <= 3; i++ {
assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3)
}
// Propose another value
values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2")
assert.NoError(t, err, "failed to propose value")
for _, node := range nodes {
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
var err error
node.MemoryStore().View(func(tx store.ReadTx) {
var allNodes []*api.Node
allNodes, err = store.FindNodes(tx, store.All)
if err != nil {
return
}
if len(allNodes) != 2 {
err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
return
}
for i, nodeID := range []string{"id1", "id2"} {
n := store.GetNode(tx, nodeID)
if !reflect.DeepEqual(n, values[i]) {
err = fmt.Errorf("node %s did not match expected value", nodeID)
return
}
}
})
return err
}))
}
}
示例15: TestStress
func TestStress(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// number of nodes that are running
nup := len(nodes)
// record of nodes that are down
idleNodes := map[int]struct{}{}
// record of ids that proposed successfully or time-out
pIDs := []string{}
leader := -1
for iters := 0; iters < 1000; iters++ {
// keep proposing new values and killing leader
for i := 1; i <= 5; i++ {
if nodes[uint64(i)] != nil {
id := strconv.Itoa(iters)
_, err := raftutils.ProposeValue(t, nodes[uint64(i)], id)
if err == nil {
pIDs = append(pIDs, id)
// if propose successfully, at least there are 3 running nodes
assert.True(t, nup >= 3)
// only leader can propose value
assert.True(t, leader == i || leader == -1)
// update leader
leader = i
break
} else if strings.Contains(err.Error(), "context deadline exceeded") {
// though it's timing out, we still record this value
// for it may be proposed successfully and stored in Raft some time later
pIDs = append(pIDs, id)
}
}
}
if rand.Intn(100) < 10 {
// increase clock to make potential election finish quickly
clockSource.Increment(200 * time.Millisecond)
time.Sleep(10 * time.Millisecond)
} else {
ms := rand.Intn(10)
clockSource.Increment(time.Duration(ms) * time.Millisecond)
}
if leader != -1 {
// if propose successfully, try to kill a node in random
s := rand.Intn(5) + 1
if _, ok := idleNodes[s]; !ok {
id := uint64(s)
nodes[id].Server.Stop()
nodes[id].Shutdown()
idleNodes[s] = struct{}{}
nup -= 1
if s == leader {
// leader is killed
leader = -1
}
}
}
if nup < 3 {
// if quorum is lost, try to bring back a node
s := rand.Intn(5) + 1
if _, ok := idleNodes[s]; ok {
id := uint64(s)
nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false)
delete(idleNodes, s)
nup++
}
}
}
// bring back all nodes and propose the final value
for i := range idleNodes {
id := uint64(i)
nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false)
}
raftutils.WaitForCluster(t, clockSource, nodes)
id := strconv.Itoa(1000)
val, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), id)
assert.NoError(t, err, "failed to propose value")
pIDs = append(pIDs, id)
// increase clock to make cluster stable
time.Sleep(500 * time.Millisecond)
clockSource.Increment(500 * time.Millisecond)
ids, values := raftutils.GetAllValuesOnNode(t, clockSource, nodes[1])
// since cluster is stable, final value must be in the raft store
find := false
for _, value := range values {
if reflect.DeepEqual(value, val) {
find = true
//.........這裏部分代碼省略.........