本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/raft/testutils.AddRaftNode函數的典型用法代碼示例。如果您正苦於以下問題:Golang AddRaftNode函數的具體用法?Golang AddRaftNode怎麽用?Golang AddRaftNode使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了AddRaftNode函數的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRaftQuorumRecovery
func TestRaftQuorumRecovery(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// Lose a majority
for i := uint64(1); i <= 3; i++ {
nodes[i].Server.Stop()
nodes[i].Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
// Restore the majority by restarting node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
delete(nodes, 1)
delete(nodes, 2)
raftutils.WaitForCluster(t, clockSource, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, raftutils.Leader(nodes))
assert.NoError(t, err)
for _, node := range nodes {
raftutils.CheckValue(t, clockSource, node, value)
}
}
示例2: TestRaftFollowerLeave
func TestRaftFollowerLeave(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// Node 5 leaves the cluster
// Use gRPC instead of calling handler directly because of
// authorization check.
cc, err := dial(nodes[1], nodes[1].Address)
assert.NoError(t, err)
raftClient := api.NewRaftMembershipClient(cc)
defer cc.Close()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[5].Config.ID}})
assert.NoError(t, err, "error sending message to leave the raft")
assert.NotNil(t, resp, "leave response message is nil")
raftutils.ShutdownNode(nodes[5])
delete(nodes, 5)
raftutils.WaitForPeerNumber(t, clockSource, nodes, 4)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
assert.NoError(t, err, "failed to propose value")
// Value should be replicated on every node
raftutils.CheckValue(t, clockSource, nodes[1], value)
assert.Len(t, nodes[1].GetMemberlist(), 4)
raftutils.CheckValue(t, clockSource, nodes[2], value)
assert.Len(t, nodes[2].GetMemberlist(), 4)
raftutils.CheckValue(t, clockSource, nodes[3], value)
assert.Len(t, nodes[3].GetMemberlist(), 4)
raftutils.CheckValue(t, clockSource, nodes[4], value)
assert.Len(t, nodes[4].GetMemberlist(), 4)
}
示例3: TestRaftQuorumFailure
func TestRaftQuorumFailure(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// Lose a majority
for i := uint64(3); i <= 5; i++ {
nodes[i].Server.Stop()
nodes[i].Stop()
}
// Propose a value
_, err := raftutils.ProposeValue(t, nodes[1])
assert.Error(t, err)
// The value should not be replicated, we have no majority
raftutils.CheckNoValue(t, clockSource, nodes[2])
raftutils.CheckNoValue(t, clockSource, nodes[1])
}
示例4: TestRaftNewNodeGetsData
func TestRaftNewNodeGetsData(t *testing.T) {
t.Parallel()
// Bring up a 3 node cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
value, err := raftutils.ProposeValue(t, nodes[1])
assert.NoError(t, err, "failed to propose value")
// Add a new node
raftutils.AddRaftNode(t, clockSource, nodes, tc)
time.Sleep(500 * time.Millisecond)
// Value should be replicated on every node
for _, node := range nodes {
raftutils.CheckValue(t, clockSource, node, value)
assert.Equal(t, len(node.GetMemberlist()), 4)
}
}
示例5: TestStress
func TestStress(t *testing.T) {
t.Parallel()
// Bring up a 5 nodes cluster
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
defer raftutils.TeardownCluster(t, nodes)
// number of nodes that are running
nup := len(nodes)
// record of nodes that are down
idleNodes := map[int]struct{}{}
// record of ids that proposed successfully or time-out
pIDs := []string{}
leader := -1
for iters := 0; iters < 1000; iters++ {
// keep proposing new values and killing leader
for i := 1; i <= 5; i++ {
if nodes[uint64(i)] != nil {
id := strconv.Itoa(iters)
_, err := raftutils.ProposeValue(t, nodes[uint64(i)], id)
if err == nil {
pIDs = append(pIDs, id)
// if propose successfully, at least there are 3 running nodes
assert.True(t, nup >= 3)
// only leader can propose value
assert.True(t, leader == i || leader == -1)
// update leader
leader = i
break
} else if strings.Contains(err.Error(), "context deadline exceeded") {
// though it's timing out, we still record this value
// for it may be proposed successfully and stored in Raft some time later
pIDs = append(pIDs, id)
}
}
}
if rand.Intn(100) < 10 {
// increase clock to make potential election finish quickly
clockSource.Increment(200 * time.Millisecond)
time.Sleep(10 * time.Millisecond)
} else {
ms := rand.Intn(10)
clockSource.Increment(time.Duration(ms) * time.Millisecond)
}
if leader != -1 {
// if propose successfully, try to kill a node in random
s := rand.Intn(5) + 1
if _, ok := idleNodes[s]; !ok {
id := uint64(s)
nodes[id].Server.Stop()
nodes[id].Shutdown()
idleNodes[s] = struct{}{}
nup -= 1
if s == leader {
// leader is killed
leader = -1
}
}
}
if nup < 3 {
// if quorum is lost, try to bring back a node
s := rand.Intn(5) + 1
if _, ok := idleNodes[s]; ok {
id := uint64(s)
nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false)
delete(idleNodes, s)
nup++
}
}
}
// bring back all nodes and propose the final value
for i := range idleNodes {
id := uint64(i)
nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false)
}
raftutils.WaitForCluster(t, clockSource, nodes)
id := strconv.Itoa(1000)
val, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), id)
assert.NoError(t, err, "failed to propose value")
pIDs = append(pIDs, id)
// increase clock to make cluster stable
time.Sleep(500 * time.Millisecond)
clockSource.Increment(500 * time.Millisecond)
ids, values := raftutils.GetAllValuesOnNode(t, clockSource, nodes[1])
// since cluster is stable, final value must be in the raft store
find := false
for _, value := range values {
if reflect.DeepEqual(value, val) {
find = true
//.........這裏部分代碼省略.........
示例6: TestListManagerNodes
func TestListManagerNodes(t *testing.T) {
t.Parallel()
tc := cautils.NewTestCA(nil)
defer tc.Stop()
ts := newTestServer(t)
defer ts.Stop()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Create a node object for each of the managers
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()}))
return nil
}))
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
// There should be 3 reachable managers listed
r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.NotNil(t, r)
managers := getMap(t, r.Nodes)
assert.Len(t, ts.Server.raft.GetMemberlist(), 3)
assert.Len(t, r.Nodes, 3)
// Node 1 should be the leader
for i := 1; i <= 3; i++ {
if i == 1 {
assert.True(t, managers[nodes[uint64(i)].Config.ID].Leader)
continue
}
assert.False(t, managers[nodes[uint64(i)].Config.ID].Leader)
}
// All nodes should be reachable
for i := 1; i <= 3; i++ {
assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability)
}
// Add two more nodes to the cluster
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
// Add node entries for these
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[4].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[5].SecurityConfig.ClientTLSCreds.NodeID()}))
return nil
}))
// There should be 5 reachable managers listed
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.NotNil(t, r)
managers = getMap(t, r.Nodes)
assert.Len(t, ts.Server.raft.GetMemberlist(), 5)
assert.Len(t, r.Nodes, 5)
for i := 1; i <= 5; i++ {
assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability)
}
// Stops 2 nodes
nodes[4].Server.Stop()
nodes[4].ShutdownRaft()
nodes[5].Server.Stop()
nodes[5].ShutdownRaft()
// Node 4 and Node 5 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
if err != nil {
return err
}
managers = getMap(t, r.Nodes)
if len(r.Nodes) != 5 {
return fmt.Errorf("expected 5 nodes, got %d", len(r.Nodes))
}
if managers[nodes[4].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 4 to be unreachable")
}
if managers[nodes[5].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 5 to be unreachable")
}
return nil
}))
// Restart the 2 nodes
nodes[4] = raftutils.RestartNode(t, clockSource, nodes[4], false)
//.........這裏部分代碼省略.........