本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/raft/testutils.TeardownCluster函數的典型用法代碼示例。如果您正苦於以下問題:Golang TeardownCluster函數的具體用法?Golang TeardownCluster怎麽用?Golang TeardownCluster使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了TeardownCluster函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRaftQuorumRecovery
func TestRaftQuorumRecovery(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// Lose a majority
for i := uint64(1); i <= 3; i++ {
nodes[i].Server.Stop()
nodes[i].Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
// Restore the majority by restarting node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
delete(nodes, 1)
delete(nodes, 2)
raftutils.WaitForCluster(t, clockSource, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, raftutils.Leader(nodes))
assert.NoError(t, err)
for _, node := range nodes {
raftutils.CheckValue(t, clockSource, node, value)
}
}
示例2: testRaftRestartCluster
func testRaftRestartCluster(t *testing.T, stagger bool) {
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], "id1")
assert.NoError(t, err, "failed to propose value")
// Stop all nodes
for _, node := range nodes {
node.Server.Stop()
node.Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
// Restart all nodes
i := 0
for k, node := range nodes {
if stagger && i != 0 {
raftutils.AdvanceTicks(clockSource, 1)
}
nodes[k] = raftutils.RestartNode(t, clockSource, node, false)
i++
}
raftutils.WaitForCluster(t, clockSource, nodes)
// Propose another value
values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2")
assert.NoError(t, err, "failed to propose value")
for _, node := range nodes {
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
var err error
node.MemoryStore().View(func(tx store.ReadTx) {
var allNodes []*api.Node
allNodes, err = store.FindNodes(tx, store.All)
if err != nil {
return
}
if len(allNodes) != 2 {
err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
return
}
for i, nodeID := range []string{"id1", "id2"} {
n := store.GetNode(tx, nodeID)
if !reflect.DeepEqual(n, values[i]) {
err = fmt.Errorf("node %s did not match expected value", nodeID)
return
}
}
})
return err
}))
}
}
示例3: TestRaftBootstrap
func TestRaftBootstrap(t *testing.T) {
t.Parallel()
nodes, _ := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
assert.Equal(t, 3, len(nodes[1].GetMemberlist()))
assert.Equal(t, 3, len(nodes[2].GetMemberlist()))
assert.Equal(t, 3, len(nodes[3].GetMemberlist()))
}
示例4: TestRaftEncryptionKeyRotationStress
// This test rotates the encryption key and restarts the node - the intent is try to trigger
// race conditions if there is more than one node and hence consensus may take longer.
func TestRaftEncryptionKeyRotationStress(t *testing.T) {
t.Parallel()
// Bring up a 3 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
leader := nodes[1]
// constantly propose values
done, stop, restart, clusterReady := make(chan struct{}), make(chan struct{}), make(chan struct{}), make(chan struct{})
go func() {
counter := len(nodes)
for {
select {
case <-stop:
close(done)
return
case <-restart:
// the node restarts may trigger a leadership change, so wait until the cluster has 3
// nodes again and a leader is selected before proposing more values
<-clusterReady
leader = raftutils.Leader(nodes)
default:
counter += 1
raftutils.ProposeValue(t, leader, DefaultProposalTime, fmt.Sprintf("id%d", counter))
}
}
}()
for i := 0; i < 30; i++ {
// rotate the encryption key
nodes[3].KeyRotator.QueuePendingKey([]byte(fmt.Sprintf("newKey%d", i)))
nodes[3].KeyRotator.RotationNotify() <- struct{}{}
require.NoError(t, raftutils.PollFunc(clockSource, func() error {
if nodes[3].KeyRotator.GetKeys().PendingDEK == nil {
return nil
}
return fmt.Errorf("not done rotating yet")
}))
// restart the node and wait for everything to settle and a leader to be elected
nodes[3].Server.Stop()
nodes[3].ShutdownRaft()
restart <- struct{}{}
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.AdvanceTicks(clockSource, 1)
raftutils.WaitForCluster(t, clockSource, nodes)
clusterReady <- struct{}{}
}
close(stop)
<-done
}
示例5: TestCanRemoveMember
func TestCanRemoveMember(t *testing.T) {
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Stop node 2 and node 3 (2 nodes out of 3)
nodes[2].Server.Stop()
nodes[2].Shutdown()
nodes[3].Server.Stop()
nodes[3].Shutdown()
// Node 2 and Node 3 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[2].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 2 to be unreachable")
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 3 to be unreachable")
}
return nil
}))
// Removing node 3 should fail
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
err := nodes[1].RemoveMember(ctx, 3)
assert.Error(t, err)
assert.Equal(t, err, raft.ErrCannotRemoveMember)
members := nodes[1].GetMemberlist()
assert.Equal(t, len(members), 3)
// Restart node 2 and node 3
nodes[2] = raftutils.RestartNode(t, clockSource, nodes[2], false)
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// Removing node 3 should succeed
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
err = nodes[1].RemoveMember(ctx, nodes[3].Config.ID)
assert.NoError(t, err)
members = nodes[1].GetMemberlist()
assert.Nil(t, members[nodes[3].Config.ID])
assert.Equal(t, len(members), 2)
// Removing node 2 should fail
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
err = nodes[1].RemoveMember(ctx, nodes[2].Config.ID)
assert.Error(t, err)
assert.Equal(t, err, raft.ErrCannotRemoveMember)
assert.Equal(t, len(members), 2)
}
示例6: TestRaftLeaderDown
func TestRaftLeaderDown(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Stop node 1
nodes[1].Stop()
newCluster := map[uint64]*raftutils.TestNode{
2: nodes[2],
3: nodes[3],
}
// Wait for the re-election to occur
raftutils.WaitForCluster(t, clockSource, newCluster)
// Leader should not be 1
assert.NotEqual(t, nodes[2].Leader(), nodes[1].Config.ID)
// Ensure that node 2 and node 3 have the same leader
assert.Equal(t, nodes[3].Leader(), nodes[2].Leader())
// Find the leader node and a follower node
var (
leaderNode *raftutils.TestNode
followerNode *raftutils.TestNode
)
for i, n := range newCluster {
if n.Config.ID == n.Leader() {
leaderNode = n
if i == 2 {
followerNode = newCluster[3]
} else {
followerNode = newCluster[2]
}
}
}
require.NotNil(t, leaderNode)
require.NotNil(t, followerNode)
// Propose a value
value, err := raftutils.ProposeValue(t, leaderNode)
assert.NoError(t, err, "failed to propose value")
// The value should be replicated on all remaining nodes
raftutils.CheckValue(t, clockSource, leaderNode, value)
assert.Equal(t, len(leaderNode.GetMemberlist()), 3)
raftutils.CheckValue(t, clockSource, followerNode, value)
assert.Equal(t, len(followerNode.GetMemberlist()), 3)
}
示例7: TestRaftLeader
func TestRaftLeader(t *testing.T) {
t.Parallel()
nodes, _ := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
assert.True(t, nodes[1].IsLeader(), "error: node 1 is not the Leader")
// nodes should all have the same leader
assert.Equal(t, nodes[1].Leader(), nodes[1].Config.ID)
assert.Equal(t, nodes[2].Leader(), nodes[1].Config.ID)
assert.Equal(t, nodes[3].Leader(), nodes[1].Config.ID)
}
示例8: TestRaftLogReplication
func TestRaftLogReplication(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1])
assert.NoError(t, err, "failed to propose value")
// All nodes should have the value in the physical store
raftutils.CheckValue(t, clockSource, nodes[1], value)
raftutils.CheckValue(t, clockSource, nodes[2], value)
raftutils.CheckValue(t, clockSource, nodes[3], value)
}
示例9: TestRaftUnreachableNode
func TestRaftUnreachableNode(t *testing.T) {
t.Parallel()
nodes := make(map[uint64]*raftutils.TestNode)
var clockSource *fakeclock.FakeClock
nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Add a new node
nodes[2] = raftutils.NewNode(t, clockSource, tc, raft.NodeOptions{JoinAddr: nodes[1].Address})
err := nodes[2].JoinAndStart(ctx)
require.NoError(t, err, "can't join cluster")
go nodes[2].Run(ctx)
// Stop the Raft server of second node on purpose after joining
nodes[2].Server.Stop()
nodes[2].Listener.Close()
raftutils.AdvanceTicks(clockSource, 5)
time.Sleep(100 * time.Millisecond)
wrappedListener := raftutils.RecycleWrappedListener(nodes[2].Listener)
securityConfig := nodes[2].SecurityConfig
serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)}
s := grpc.NewServer(serverOpts...)
nodes[2].Server = s
raft.Register(s, nodes[2].Node)
go func() {
// After stopping, we should receive an error from Serve
assert.Error(t, s.Serve(wrappedListener))
}()
raftutils.WaitForCluster(t, clockSource, nodes)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
assert.NoError(t, err, "failed to propose value")
// All nodes should have the value in the physical store
raftutils.CheckValue(t, clockSource, nodes[1], value)
raftutils.CheckValue(t, clockSource, nodes[2], value)
}
示例10: TestRaftLogReplicationWithoutLeader
func TestRaftLogReplicationWithoutLeader(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Stop the leader
nodes[1].Stop()
// Propose a value
_, err := raftutils.ProposeValue(t, nodes[2])
assert.Error(t, err)
// No value should be replicated in the store in the absence of the leader
raftutils.CheckNoValue(t, clockSource, nodes[2])
raftutils.CheckNoValue(t, clockSource, nodes[3])
}
示例11: TestRaftFollowerLeave
func TestRaftFollowerLeave(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// Node 5 leaves the cluster
// Use gRPC instead of calling handler directly because of
// authorization check.
cc, err := dial(nodes[1], nodes[1].Address)
assert.NoError(t, err)
raftClient := api.NewRaftMembershipClient(cc)
defer cc.Close()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[5].Config.ID}})
assert.NoError(t, err, "error sending message to leave the raft")
assert.NotNil(t, resp, "leave response message is nil")
raftutils.ShutdownNode(nodes[5])
delete(nodes, 5)
raftutils.WaitForPeerNumber(t, clockSource, nodes, 4)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
assert.NoError(t, err, "failed to propose value")
// Value should be replicated on every node
raftutils.CheckValue(t, clockSource, nodes[1], value)
assert.Len(t, nodes[1].GetMemberlist(), 4)
raftutils.CheckValue(t, clockSource, nodes[2], value)
assert.Len(t, nodes[2].GetMemberlist(), 4)
raftutils.CheckValue(t, clockSource, nodes[3], value)
assert.Len(t, nodes[3].GetMemberlist(), 4)
raftutils.CheckValue(t, clockSource, nodes[4], value)
assert.Len(t, nodes[4].GetMemberlist(), 4)
}
示例12: TestRaftJoinTwice
func TestRaftJoinTwice(t *testing.T) {
t.Parallel()
nodes, _ := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Node 3 tries to join again
// Use gRPC instead of calling handler directly because of
// authorization check.
cc, err := dial(nodes[3], nodes[1].Address)
assert.NoError(t, err)
raftClient := api.NewRaftMembershipClient(cc)
defer cc.Close()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
_, err = raftClient.Join(ctx, &api.JoinRequest{})
assert.Error(t, err, "expected error on duplicate Join")
assert.Equal(t, grpc.Code(err), codes.AlreadyExists)
assert.Equal(t, grpc.ErrorDesc(err), "a raft member with this node ID already exists")
}
示例13: TestRaftWipedState
func TestRaftWipedState(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Stop node 3
nodes[3].Server.Stop()
nodes[3].ShutdownRaft()
// Remove its state
os.RemoveAll(nodes[3].StateDir)
raftutils.AdvanceTicks(clockSource, 5)
// Restart node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
// Make sure this doesn't panic.
raftutils.PollFuncWithTimeout(clockSource, func() error { return errors.New("keep the poll going") }, time.Second)
}
示例14: TestRaftNewNodeGetsData
func TestRaftNewNodeGetsData(t *testing.T) {
t.Parallel()
// Bring up a 3 node cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1])
assert.NoError(t, err, "failed to propose value")
// Add a new node
raftutils.AddRaftNode(t, clockSource, nodes, tc)
time.Sleep(500 * time.Millisecond)
// Value should be replicated on every node
for _, node := range nodes {
raftutils.CheckValue(t, clockSource, node, value)
assert.Equal(t, len(node.GetMemberlist()), 4)
}
}
示例15: TestRaftQuorumFailure
func TestRaftQuorumFailure(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// Lose a majority
for i := uint64(3); i <= 5; i++ {
nodes[i].Server.Stop()
nodes[i].Stop()
}
// Propose a value
_, err := raftutils.ProposeValue(t, nodes[1])
assert.Error(t, err)
// The value should not be replicated, we have no majority
raftutils.CheckNoValue(t, clockSource, nodes[2])
raftutils.CheckNoValue(t, clockSource, nodes[1])
}