本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.RaftID函數的典型用法代碼示例。如果您正苦於以下問題:Golang RaftID函數的具體用法?Golang RaftID怎麽用?Golang RaftID使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了RaftID函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestHeartbeatResponseFanout
// TestHeartbeatResponseFanout check 2 raft groups on the same node distribution,
// but each group has different Term, heartbeat response from each group should
// not disturb other group's Term or Leadership
func TestHeartbeatResponseFanout(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
defer stopper.Stop()
cluster := newTestCluster(nil, 3, stopper, t)
groupID1 := proto.RaftID(1)
cluster.createGroup(groupID1, 0, 3 /* replicas */)
groupID2 := proto.RaftID(2)
cluster.createGroup(groupID2, 0, 3 /* replicas */)
leaderIndex := 0
cluster.elect(leaderIndex, groupID1)
// GroupID2 will have 3 round of election, so it will have different
// term with groupID1, but both leader on the same node.
for i := 2; i >= 0; i-- {
leaderIndex = i
cluster.elect(leaderIndex, groupID2)
}
// Send a coalesced heartbeat.
// Heartbeat response from groupID2 will have a big term than which from groupID1.
cluster.nodes[0].coalescedHeartbeat()
// Start submit a command to see if groupID1's leader changed?
cluster.nodes[0].SubmitCommand(groupID1, makeCommandID(), []byte("command"))
select {
case _ = <-cluster.events[0].CommandCommitted:
log.Infof("SubmitCommand succeed after Heartbeat Response fanout")
case <-time.After(500 * time.Millisecond):
t.Fatalf("No leader after Heartbeat Response fanout")
}
}
示例2: TestHeartbeatResponseFanout
// TestHeartbeatResponseFanout check 2 raft groups on the same node distribution,
// but each group has different Term, heartbeat response from each group should
// not disturb other group's Term or Leadership
func TestHeartbeatResponseFanout(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
cluster := newTestCluster(nil, 3, stopper, t)
groupID1 := proto.RaftID(1)
cluster.createGroup(groupID1, 0, 3 /* replicas */)
groupID2 := proto.RaftID(2)
cluster.createGroup(groupID2, 0, 3 /* replicas */)
leaderIndex := 0
cluster.triggerElection(leaderIndex, groupID1)
event := cluster.waitForElection(leaderIndex)
// Drain off the election event from other nodes.
_ = cluster.waitForElection((leaderIndex + 1) % 3)
_ = cluster.waitForElection((leaderIndex + 2) % 3)
if event.GroupID != groupID1 {
t.Fatalf("election event had incorrect groupid %v", event.GroupID)
}
if event.NodeID != cluster.nodes[leaderIndex].nodeID {
t.Fatalf("expected %v to win election, but was %v", cluster.nodes[leaderIndex].nodeID, event.NodeID)
}
// GroupID2 will have 3 round of election, so it will have different
// term with groupID1, but both leader on the same node.
for i := 2; i >= 0; i-- {
leaderIndex = i
cluster.triggerElection(leaderIndex, groupID2)
event = cluster.waitForElection(leaderIndex)
_ = cluster.waitForElection((leaderIndex + 1) % 3)
_ = cluster.waitForElection((leaderIndex + 2) % 3)
if event.GroupID != groupID2 {
t.Fatalf("election event had incorrect groupid %v", event.GroupID)
}
if event.NodeID != cluster.nodes[leaderIndex].nodeID {
t.Fatalf("expected %v to win election, but was %v", cluster.nodes[leaderIndex].nodeID, event.NodeID)
}
}
// Send a coalesced heartbeat.
// Heartbeat response from groupID2 will have a big term than which from groupID1.
cluster.nodes[0].coalescedHeartbeat()
// Start submit a command to see if groupID1's leader changed?
cluster.nodes[0].SubmitCommand(groupID1, makeCommandID(), []byte("command"))
select {
case _ = <-cluster.events[0].CommandCommitted:
log.Infof("SubmitCommand succeed after Heartbeat Response fanout")
case <-time.After(500 * time.Millisecond):
t.Fatalf("No leader after Heartbeat Response fanout")
}
}
示例3: TestSlowStorage
func TestSlowStorage(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := proto.RaftID(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(0, groupID)
cluster.waitForElection(0)
// Block the storage on the last node.
// TODO(bdarnell): there appear to still be issues if the storage is blocked during
// the election.
cluster.storages[2].Block()
// Submit a command to the leader
cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))
// Even with the third node blocked, the other nodes can make progress.
for i := 0; i < 2; i++ {
events := cluster.events[i]
log.Infof("waiting for event to be commited on node %v", i)
commit := <-events.CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
// Ensure that node 2 is in fact blocked.
time.Sleep(time.Millisecond)
select {
case commit := <-cluster.events[2].CommandCommitted:
t.Errorf("didn't expect commits on node 2 but got %v", commit)
default:
}
// After unblocking the third node, it will catch up.
cluster.storages[2].Unblock()
log.Infof("waiting for event to be commited on node 2")
// When we unblock, the backlog is not guaranteed to be processed in order,
// and in some cases the leader may need to retransmit some messages.
for i := 0; i < 3; i++ {
select {
case commit := <-cluster.events[2].CommandCommitted:
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
return
case <-time.After(5 * time.Millisecond):
// Tick both node's clocks. The ticks on the follower node don't
// really do anything, but they do ensure that that goroutine is
// getting scheduled (and the real-time delay allows rpc responses
// to pass between the nodes)
cluster.tickers[0].Tick()
cluster.tickers[2].Tick()
}
}
}
示例4: TestLeaderCache
func TestLeaderCache(t *testing.T) {
defer leaktest.AfterTest(t)
lc := newLeaderCache(3)
if r := lc.Lookup(12); r.StoreID != 0 {
t.Fatalf("lookup of missing key returned replica: %v", r)
}
replica := proto.Replica{StoreID: 1}
lc.Update(5, replica)
if r := lc.Lookup(5); r.StoreID != 1 {
t.Errorf("expected %v, got %v", replica, r)
}
newReplica := proto.Replica{StoreID: 7}
lc.Update(5, newReplica)
r := lc.Lookup(5)
if r.StoreID != 7 {
t.Errorf("expected %v, got %v", newReplica, r)
}
lc.Update(5, proto.Replica{})
r = lc.Lookup(5)
if r.StoreID != 0 {
t.Fatalf("evicted leader returned: %v", r)
}
for i := 10; i < 20; i++ {
lc.Update(proto.RaftID(i), replica)
}
if lc.Lookup(16).StoreID != 0 || lc.Lookup(17).StoreID == 0 {
t.Errorf("unexpected policy used in cache")
}
}
示例5: sendAttempt
// sendAttempt is invoked by Send. It temporarily truncates the arguments to
// match the descriptor's EndKey (if necessary) and gathers and rearranges the
// replicas before making a single attempt at sending the request. It returns
// the result of sending the RPC; a potential error contained in the reply has
// to be handled separately by the caller.
func (ds *DistSender) sendAttempt(trace *tracer.Trace, args proto.Request, reply proto.Response, desc *proto.RangeDescriptor) error {
defer trace.Epoch("sending RPC")()
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if endKey := args.Header().EndKey; endKey != nil && !endKey.Less(desc.EndKey) {
defer func(k proto.Key) { args.Header().EndKey = k }(endKey)
args.Header().EndKey = desc.EndKey
}
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
return ds.sendRPC(trace, desc.RaftID, replicas, order, args, reply)
}
示例6: handleWriteReady
// handleWriteReady converts a set of raft.Ready structs into a writeRequest
// to be persisted, marks the group as writing and sends it to the writeTask.
func (s *state) handleWriteReady(readyGroups map[uint64]raft.Ready) {
if log.V(6) {
log.Infof("node %v write ready, preparing request", s.nodeID)
}
writeRequest := newWriteRequest()
for groupID, ready := range readyGroups {
raftGroupID := proto.RaftID(groupID)
g, ok := s.groups[raftGroupID]
if !ok {
if log.V(6) {
log.Infof("dropping write request to group %d", groupID)
}
continue
}
g.writing = true
gwr := &groupWriteRequest{}
if !raft.IsEmptyHardState(ready.HardState) {
gwr.state = ready.HardState
}
if !raft.IsEmptySnap(ready.Snapshot) {
gwr.snapshot = ready.Snapshot
}
if len(ready.Entries) > 0 {
gwr.entries = ready.Entries
}
writeRequest.groups[raftGroupID] = gwr
}
s.writeTask.in <- writeRequest
}
示例7: newTestRangeSet
// newTestRangeSet creates a new range set that has the count number of ranges.
func newTestRangeSet(count int, t *testing.T) *testRangeSet {
rs := &testRangeSet{rangesByKey: btree.New(64 /* degree */)}
for i := 0; i < count; i++ {
desc := &proto.RangeDescriptor{
RaftID: proto.RaftID(i),
StartKey: proto.Key(fmt.Sprintf("%03d", i)),
EndKey: proto.Key(fmt.Sprintf("%03d", i+1)),
}
// Initialize the range stat so the scanner can use it.
rng := &Range{
stats: &rangeStats{
raftID: desc.RaftID,
MVCCStats: engine.MVCCStats{
KeyBytes: 1,
ValBytes: 2,
KeyCount: 1,
LiveCount: 1,
},
},
}
if err := rng.setDesc(desc); err != nil {
t.Fatal(err)
}
if exRngItem := rs.rangesByKey.ReplaceOrInsert(rng); exRngItem != nil {
t.Fatalf("failed to insert range %s", rng)
}
}
return rs
}
示例8: TestRaftAfterRemoveRange
// TestRaftAfterRemoveRange verifies that the MultiRaft state removes
// a remote node correctly after the Replica was removed from the Store.
func TestRaftAfterRemoveRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Make the split.
splitArgs := adminSplitArgs(proto.KeyMin, []byte("b"), proto.RaftID(1), mtc.stores[0].StoreID())
if _, err := mtc.stores[0].ExecuteCmd(context.Background(), &splitArgs); err != nil {
t.Fatal(err)
}
raftID := proto.RaftID(2)
mtc.replicateRange(raftID, 0, 1, 2)
mtc.unreplicateRange(raftID, 0, 2)
mtc.unreplicateRange(raftID, 0, 1)
rng, err := mtc.stores[1].GetRange(raftID)
if err != nil {
t.Fatal(err)
}
// If the range removal happens before the range applies the replica config change, the group
// will be re-created when MultiRaft receives a MsgApp.
if err := util.IsTrueWithin(func() bool {
return len(rng.Desc().Replicas) == 1
}, 1*time.Second); err != nil {
t.Fatal(err)
}
// Remove the range from the second Store.
if err := mtc.stores[1].RemoveRange(rng); err != nil {
t.Fatal(err)
}
if err := mtc.transport.Send(&multiraft.RaftMessageRequest{
GroupID: proto.RaftID(0),
Message: raftpb.Message{
From: uint64(mtc.stores[2].RaftNodeID()),
To: uint64(mtc.stores[1].RaftNodeID()),
Type: raftpb.MsgHeartbeat,
}}); err != nil {
t.Fatal(err)
}
// Execute another replica change to ensure that MultiRaft has processed the heartbeat just sent.
mtc.replicateRange(proto.RaftID(1), 0, 1)
}
示例9: TestLocalSenderLookupReplica
func TestLocalSenderLookupReplica(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
defer stopper.Stop()
ctx := storage.TestStoreContext
manualClock := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manualClock.UnixNano)
ls := NewLocalSender()
// Create two new stores with ranges we care about.
var e [2]engine.Engine
var s [2]*storage.Store
ranges := []struct {
storeID proto.StoreID
start, end proto.Key
}{
{2, proto.Key("a"), proto.Key("c")},
{3, proto.Key("x"), proto.Key("z")},
}
for i, rng := range ranges {
e[i] = engine.NewInMem(proto.Attributes{}, 1<<20)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
defer ctx.Transport.Close()
s[i] = storage.NewStore(ctx, e[i], &proto.NodeDescriptor{NodeID: 1})
s[i].Ident.StoreID = rng.storeID
desc := &proto.RangeDescriptor{
RaftID: proto.RaftID(i),
StartKey: rng.start,
EndKey: rng.end,
Replicas: []proto.Replica{{StoreID: rng.storeID}},
}
newRng, err := storage.NewRange(desc, s[i])
if err != nil {
t.Fatal(err)
}
if err := s[i].AddRangeTest(newRng); err != nil {
t.Error(err)
}
ls.AddStore(s[i])
}
if _, r, err := ls.lookupReplica(proto.Key("a"), proto.Key("c")); r.StoreID != s[0].Ident.StoreID || err != nil {
t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
}
if _, r, err := ls.lookupReplica(proto.Key("b"), nil); r.StoreID != s[0].Ident.StoreID || err != nil {
t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
}
if _, r, err := ls.lookupReplica(proto.Key("b"), proto.Key("d")); r != nil || err == nil {
t.Errorf("expected store 0 and error got %d", r.StoreID)
}
if _, r, err := ls.lookupReplica(proto.Key("x"), proto.Key("z")); r.StoreID != s[1].Ident.StoreID {
t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
}
if _, r, err := ls.lookupReplica(proto.Key("y"), nil); r.StoreID != s[1].Ident.StoreID || err != nil {
t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
}
}
示例10: DecodeRaftStateKey
// DecodeRaftStateKey extracts the Raft ID from a RaftStateKey.
func DecodeRaftStateKey(key proto.Key) proto.RaftID {
if !bytes.HasPrefix(key, LocalRangeIDPrefix) {
panic(fmt.Sprintf("key %q does not have %q prefix", key, LocalRangeIDPrefix))
}
// Cut the prefix and the Raft ID.
b := key[len(LocalRangeIDPrefix):]
_, raftID := encoding.DecodeUvarint(b)
return proto.RaftID(raftID)
}
示例11: TestMembershipChange
func TestMembershipChange(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
cluster := newTestCluster(nil, 4, stopper, t)
defer stopper.Stop()
// Create a group with a single member, cluster.nodes[0].
groupID := proto.RaftID(1)
cluster.createGroup(groupID, 0, 1)
// An automatic election is triggered since this is a single-node Raft group,
// so we don't need to call triggerElection.
// Consume and apply the membership change events.
for i := 0; i < 4; i++ {
go func(i int) {
for {
e, ok := <-cluster.events[i].MembershipChangeCommitted
if !ok {
return
}
e.Callback(nil)
}
}(i)
}
// Add each of the other three nodes to the cluster.
for i := 1; i < 4; i++ {
ch := cluster.nodes[0].ChangeGroupMembership(groupID, makeCommandID(),
raftpb.ConfChangeAddNode,
cluster.nodes[i].nodeID, nil)
<-ch
}
// TODO(bdarnell): verify that the channel events are sent out correctly.
/*
for i := 0; i < 10; i++ {
log.Infof("tick %d", i)
cluster.tickers[0].Tick()
time.Sleep(5 * time.Millisecond)
}
// Each node is notified of each other node's joining.
for i := 0; i < 4; i++ {
for j := 1; j < 4; j++ {
select {
case e := <-cluster.events[i].MembershipChangeCommitted:
if e.NodeID != cluster.nodes[j].nodeID {
t.Errorf("node %d expected event for %d, got %d", i, j, e.NodeID)
}
default:
t.Errorf("node %d did not get expected event for %d", i, j)
}
}
}*/
}
示例12: TestLeaderElectionEvent
func TestLeaderElectionEvent(t *testing.T) {
defer leaktest.AfterTest(t)
// Leader election events are fired when the leader commits an entry, not when it
// issues a call for votes.
stopper := stop.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := proto.RaftID(1)
cluster.createGroup(groupID, 0, 3)
// Process a Ready with a new leader but no new commits.
// This happens while an election is in progress.
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
SoftState: &raft.SoftState{
Lead: 3,
},
})
// No events are sent.
select {
case e := <-cluster.events[1].LeaderElection:
t.Fatalf("got unexpected event %v", e)
case <-time.After(time.Millisecond):
}
// Now there are new committed entries. A new leader always commits an entry
// to conclude the election.
entry := raftpb.Entry{
Index: 42,
Term: 42,
}
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
Entries: []raftpb.Entry{entry},
CommittedEntries: []raftpb.Entry{entry},
})
// Now we get an event.
select {
case e := <-cluster.events[1].LeaderElection:
if !reflect.DeepEqual(e, &EventLeaderElection{
GroupID: groupID,
NodeID: 3,
Term: 42,
}) {
t.Errorf("election event did not match expectations: %+v", e)
}
case <-time.After(time.Millisecond):
t.Fatal("didn't get expected event")
}
}
示例13: TestReplicateAfterSplit
// TestReplicateAfterSplit verifies that a new replica whose start key
// is not KeyMin replicating to a fresh store can apply snapshots correctly.
func TestReplicateAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
raftID := proto.RaftID(1)
splitKey := proto.Key("m")
key := proto.Key("z")
store0 := mtc.stores[0]
// Make the split
splitArgs := adminSplitArgs(proto.KeyMin, splitKey, raftID, store0.StoreID())
if _, err := store0.ExecuteCmd(context.Background(), &splitArgs); err != nil {
t.Fatal(err)
}
raftID2 := store0.LookupRange(key, nil).Desc().RaftID
if raftID2 == raftID {
t.Errorf("got same raft id after split")
}
// Issue an increment for later check.
incArgs := incrementArgs(key, 11, raftID2, store0.StoreID())
if _, err := store0.ExecuteCmd(context.Background(), &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
mtc.replicateRange(raftID2, 0, 1)
if mtc.stores[1].LookupRange(key, nil).GetMaxBytes() == 0 {
t.Error("Range MaxBytes is not set after snapshot applied")
}
// Once it catches up, the effects of increment commands can be seen.
if err := util.IsTrueWithin(func() bool {
getArgs := getArgs(key, raftID2, mtc.stores[1].StoreID())
// Reading on non-leader replica should use inconsistent read
getArgs.ReadConsistency = proto.INCONSISTENT
reply, err := mtc.stores[1].ExecuteCmd(context.Background(), &getArgs)
if err != nil {
return false
}
getResp := reply.(*proto.GetResponse)
if log.V(1) {
log.Infof("read value %d", mustGetInteger(getResp.Value))
}
return mustGetInteger(getResp.Value) == 11
}, 1*time.Second); err != nil {
t.Fatal(err)
}
}
示例14: TestInitialLeaderElection
func TestInitialLeaderElection(t *testing.T) {
defer leaktest.AfterTest(t)
// Run the test three times, each time triggering a different node's election clock.
// The node that requests an election first should win.
for leaderIndex := 0; leaderIndex < 3; leaderIndex++ {
log.Infof("testing leader election for node %v", leaderIndex)
stopper := stop.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
groupID := proto.RaftID(1)
cluster.createGroup(groupID, 0, 3)
cluster.elect(leaderIndex, groupID)
stopper.Stop()
}
}
示例15: TestProgressWithDownNode
// TestProgressWithDownNode verifies that a surviving quorum can make progress
// with a downed node.
func TestProgressWithDownNode(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
raftID := proto.RaftID(1)
mtc.replicateRange(raftID, 0, 1, 2)
incArgs, incResp := incrementArgs([]byte("a"), 5, raftID, mtc.stores[0].StoreID())
if err := mtc.stores[0].ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incResp}); err != nil {
t.Fatal(err)
}
// Verify that the first increment propagates to all the engines.
verify := func(expected []int64) {
util.SucceedsWithin(t, time.Second, func() error {
values := []int64{}
for _, eng := range mtc.engines {
val, _, err := engine.MVCCGet(eng, proto.Key("a"), mtc.clock.Now(), true, nil)
if err != nil {
return err
}
values = append(values, mustGetInteger(val))
}
if !reflect.DeepEqual(expected, values) {
return util.Errorf("expected %v, got %v", expected, values)
}
return nil
})
}
verify([]int64{5, 5, 5})
// Stop one of the replicas and issue a new increment.
mtc.stopStore(1)
incArgs, incResp = incrementArgs([]byte("a"), 11, raftID, mtc.stores[0].StoreID())
if err := mtc.stores[0].ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incResp}); err != nil {
t.Fatal(err)
}
// The new increment can be seen on both live replicas.
verify([]int64{16, 5, 16})
// Once the downed node is restarted, it will catch up.
mtc.restartStore(1)
verify([]int64{16, 16, 16})
}