本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.RangeID函數的典型用法代碼示例。如果您正苦於以下問題:Golang RangeID函數的具體用法?Golang RangeID怎麽用?Golang RangeID使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了RangeID函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestHeartbeatResponseFanout
// TestHeartbeatResponseFanout check 2 raft groups on the same node distribution,
// but each group has different Term, heartbeat response from each group should
// not disturb other group's Term or Leadership
func TestHeartbeatResponseFanout(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
defer stopper.Stop()
cluster := newTestCluster(nil, 3, stopper, t)
groupID1 := proto.RangeID(1)
cluster.createGroup(groupID1, 0, 3 /* replicas */)
groupID2 := proto.RangeID(2)
cluster.createGroup(groupID2, 0, 3 /* replicas */)
leaderIndex := 0
cluster.elect(leaderIndex, groupID1)
// GroupID2 will have 3 round of election, so it will have different
// term with groupID1, but both leader on the same node.
for i := 2; i >= 0; i-- {
leaderIndex = i
cluster.elect(leaderIndex, groupID2)
}
// Send a coalesced heartbeat.
// Heartbeat response from groupID2 will have a big term than which from groupID1.
cluster.nodes[0].coalescedHeartbeat()
// Start submit a command to see if groupID1's leader changed?
cluster.nodes[0].SubmitCommand(groupID1, makeCommandID(), []byte("command"))
select {
case _ = <-cluster.events[0].CommandCommitted:
log.Infof("SubmitCommand succeed after Heartbeat Response fanout")
case <-time.After(500 * time.Millisecond):
t.Fatalf("No leader after Heartbeat Response fanout")
}
}
示例2: sendAttempt
// sendAttempt gathers and rearranges the replicas, and makes an RPC call.
func (ds *DistSender) sendAttempt(trace *tracer.Trace, ba proto.BatchRequest, desc *proto.RangeDescriptor) (*proto.BatchResponse, error) {
defer trace.Epoch("sending RPC")()
leader := ds.leaderCache.Lookup(proto.RangeID(desc.RangeID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsReadOnly(&ba) && ba.ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
// TODO(tschottdorf) &ba -> ba
resp, err := ds.sendRPC(trace, desc.RangeID, replicas, order, &ba)
if err != nil {
return nil, err
}
// Untangle the error from the received response.
br := resp.(*proto.BatchResponse)
err = br.GoError()
br.Error = nil
return br, err
}
示例3: newTestRangeSet
// newTestRangeSet creates a new range set that has the count number of ranges.
func newTestRangeSet(count int, t *testing.T) *testRangeSet {
rs := &testRangeSet{rangesByKey: btree.New(64 /* degree */)}
for i := 0; i < count; i++ {
desc := &proto.RangeDescriptor{
RangeID: proto.RangeID(i),
StartKey: proto.Key(fmt.Sprintf("%03d", i)),
EndKey: proto.Key(fmt.Sprintf("%03d", i+1)),
}
// Initialize the range stat so the scanner can use it.
rng := &Replica{
stats: &rangeStats{
raftID: desc.RangeID,
MVCCStats: engine.MVCCStats{
KeyBytes: 1,
ValBytes: 2,
KeyCount: 1,
LiveCount: 1,
},
},
}
if err := rng.setDesc(desc); err != nil {
t.Fatal(err)
}
if exRngItem := rs.rangesByKey.ReplaceOrInsert(rng); exRngItem != nil {
t.Fatalf("failed to insert range %s", rng)
}
}
return rs
}
示例4: handleWriteReady
// handleWriteReady converts a set of raft.Ready structs into a writeRequest
// to be persisted, marks the group as writing and sends it to the writeTask.
func (s *state) handleWriteReady(readyGroups map[uint64]raft.Ready) {
if log.V(6) {
log.Infof("node %v write ready, preparing request", s.nodeID)
}
writeRequest := newWriteRequest()
for groupID, ready := range readyGroups {
raftGroupID := proto.RangeID(groupID)
g, ok := s.groups[raftGroupID]
if !ok {
if log.V(6) {
log.Infof("dropping write request to group %d", groupID)
}
continue
}
g.writing = true
gwr := &groupWriteRequest{}
if !raft.IsEmptyHardState(ready.HardState) {
gwr.state = ready.HardState
}
if !raft.IsEmptySnap(ready.Snapshot) {
gwr.snapshot = ready.Snapshot
}
if len(ready.Entries) > 0 {
gwr.entries = ready.Entries
}
writeRequest.groups[raftGroupID] = gwr
}
s.writeTask.in <- writeRequest
}
示例5: TestLeaderElectionEvent
func TestLeaderElectionEvent(t *testing.T) {
defer leaktest.AfterTest(t)
// Leader election events are fired when the leader commits an entry, not when it
// issues a call for votes.
stopper := stop.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := proto.RangeID(1)
cluster.createGroup(groupID, 0, 3)
// Process a Ready with a new leader but no new commits.
// This happens while an election is in progress.
// This may be dirty, but it seems this is the only way to make testrace pass.
cluster.nodes[1].callbackChan <- func() {
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
SoftState: &raft.SoftState{
Lead: 3,
},
})
}
// Trigger multiraft another round select
cluster.tickers[1].Tick()
// No events are sent.
select {
case e := <-cluster.events[1].LeaderElection:
t.Fatalf("got unexpected event %v", e)
case <-time.After(200 * time.Millisecond):
}
// Now there are new committed entries. A new leader always commits an entry
// to conclude the election.
entry := raftpb.Entry{
Index: 42,
Term: 42,
}
// This may be dirty, but it seems this is the only way to make testrace pass.
cluster.nodes[1].callbackChan <- func() {
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
Entries: []raftpb.Entry{entry},
CommittedEntries: []raftpb.Entry{entry},
})
}
cluster.tickers[1].Tick()
// Now we get an event.
select {
case e := <-cluster.events[1].LeaderElection:
if !reflect.DeepEqual(e, &EventLeaderElection{
GroupID: groupID,
NodeID: 3,
Term: 42,
}) {
t.Errorf("election event did not match expectations: %+v", e)
}
case <-time.After(200 * time.Millisecond):
t.Fatal("didn't get expected event")
}
}
示例6: sendAttempt
// sendAttempt is invoked by Send. It temporarily truncates the arguments to
// match the descriptor's EndKey (if necessary) and gathers and rearranges the
// replicas before making a single attempt at sending the request. It returns
// the result of sending the RPC; a potential error contained in the reply has
// to be handled separately by the caller.
func (ds *DistSender) sendAttempt(trace *tracer.Trace, args proto.Request, desc *proto.RangeDescriptor) (proto.Response, error) {
defer trace.Epoch("sending RPC")()
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if endKey := args.Header().EndKey; endKey != nil && !endKey.Less(desc.EndKey) {
defer func(k proto.Key) { args.Header().EndKey = k }(endKey)
args.Header().EndKey = desc.EndKey
}
leader := ds.leaderCache.Lookup(proto.RangeID(desc.RangeID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
return ds.sendRPC(trace, desc.RangeID, replicas, order, args)
}
示例7: TestLocalSenderLookupReplica
func TestLocalSenderLookupReplica(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
defer stopper.Stop()
ctx := storage.TestStoreContext
manualClock := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manualClock.UnixNano)
ls := NewLocalSender()
// Create two new stores with ranges we care about.
var e [2]engine.Engine
var s [2]*storage.Store
ranges := []struct {
storeID proto.StoreID
start, end proto.Key
}{
{2, proto.Key("a"), proto.Key("c")},
{3, proto.Key("x"), proto.Key("z")},
}
for i, rng := range ranges {
e[i] = engine.NewInMem(proto.Attributes{}, 1<<20)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
defer ctx.Transport.Close()
s[i] = storage.NewStore(ctx, e[i], &proto.NodeDescriptor{NodeID: 1})
s[i].Ident.StoreID = rng.storeID
desc := &proto.RangeDescriptor{
RangeID: proto.RangeID(i),
StartKey: rng.start,
EndKey: rng.end,
Replicas: []proto.Replica{{StoreID: rng.storeID}},
}
newRng, err := storage.NewReplica(desc, s[i])
if err != nil {
t.Fatal(err)
}
if err := s[i].AddRangeTest(newRng); err != nil {
t.Error(err)
}
ls.AddStore(s[i])
}
if _, r, err := ls.lookupReplica(proto.Key("a"), proto.Key("c")); r.StoreID != s[0].Ident.StoreID || err != nil {
t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
}
if _, r, err := ls.lookupReplica(proto.Key("b"), nil); r.StoreID != s[0].Ident.StoreID || err != nil {
t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
}
if _, r, err := ls.lookupReplica(proto.Key("b"), proto.Key("d")); r != nil || err == nil {
t.Errorf("expected store 0 and error got %d", r.StoreID)
}
if _, r, err := ls.lookupReplica(proto.Key("x"), proto.Key("z")); r.StoreID != s[1].Ident.StoreID {
t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
}
if _, r, err := ls.lookupReplica(proto.Key("y"), nil); r.StoreID != s[1].Ident.StoreID || err != nil {
t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
}
}
示例8: DecodeRaftStateKey
// DecodeRaftStateKey extracts the Range ID from a RaftStateKey.
func DecodeRaftStateKey(key proto.Key) proto.RangeID {
if !bytes.HasPrefix(key, LocalRangeIDPrefix) {
panic(fmt.Sprintf("key %q does not have %q prefix", key, LocalRangeIDPrefix))
}
// Cut the prefix and the Range ID.
b := key[len(LocalRangeIDPrefix):]
_, rangeID := encoding.DecodeUvarint(b)
return proto.RangeID(rangeID)
}
示例9: TestSlowStorage
func TestSlowStorage(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := proto.RangeID(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(0, groupID)
// Block the storage on the last node.
cluster.storages[2].Block()
// Submit a command to the leader
cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))
// Even with the third node blocked, the other nodes can make progress.
for i := 0; i < 2; i++ {
events := cluster.events[i]
log.Infof("waiting for event to be commited on node %v", i)
commit := <-events.CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
// Ensure that node 2 is in fact blocked.
time.Sleep(time.Millisecond)
select {
case commit := <-cluster.events[2].CommandCommitted:
t.Errorf("didn't expect commits on node 2 but got %v", commit)
default:
}
// After unblocking the third node, it will catch up.
cluster.storages[2].Unblock()
log.Infof("waiting for event to be commited on node 2")
// When we unblock, the backlog is not guaranteed to be processed in order,
// and in some cases the leader may need to retransmit some messages.
for i := 0; i < 3; i++ {
select {
case commit := <-cluster.events[2].CommandCommitted:
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
return
case <-time.After(5 * time.Millisecond):
// Tick both node's clocks. The ticks on the follower node don't
// really do anything, but they do ensure that that goroutine is
// getting scheduled (and the real-time delay allows rpc responses
// to pass between the nodes)
cluster.tickers[0].Tick()
cluster.tickers[2].Tick()
}
}
}
示例10: TestMembershipChange
func TestMembershipChange(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
cluster := newTestCluster(nil, 4, stopper, t)
defer stopper.Stop()
// Create a group with a single member, cluster.nodes[0].
groupID := proto.RangeID(1)
cluster.createGroup(groupID, 0, 1)
// An automatic election is triggered since this is a single-node Raft group,
// so we don't need to call triggerElection.
// Consume and apply the membership change events.
for i := 0; i < 4; i++ {
go func(i int) {
for {
e, ok := <-cluster.events[i].MembershipChangeCommitted
if !ok {
return
}
e.Callback(nil)
}
}(i)
}
// Add each of the other three nodes to the cluster.
for i := 1; i < 4; i++ {
ch := cluster.nodes[0].ChangeGroupMembership(groupID, makeCommandID(),
raftpb.ConfChangeAddNode,
cluster.nodes[i].nodeID, nil)
<-ch
}
// TODO(bdarnell): verify that the channel events are sent out correctly.
/*
for i := 0; i < 10; i++ {
log.Infof("tick %d", i)
cluster.tickers[0].Tick()
time.Sleep(5 * time.Millisecond)
}
// Each node is notified of each other node's joining.
for i := 0; i < 4; i++ {
for j := 1; j < 4; j++ {
select {
case e := <-cluster.events[i].MembershipChangeCommitted:
if e.NodeID != cluster.nodes[j].nodeID {
t.Errorf("node %d expected event for %d, got %d", i, j, e.NodeID)
}
default:
t.Errorf("node %d did not get expected event for %d", i, j)
}
}
}*/
}
示例11: addRange
// addRange adds a new range to the cluster but does not attach it to any
// store.
func (c *Cluster) addRange() *Range {
rangeID := proto.RangeID(len(c.ranges))
newRng := newRange(rangeID, c.allocator)
c.ranges[rangeID] = newRng
// Save a sorted array of range IDs to avoid having to calculate them
// multiple times.
c.rangeIDs = append(c.rangeIDs, rangeID)
sort.Sort(c.rangeIDs)
return newRng
}
示例12: TestRaftAfterRemoveRange
// TestRaftAfterRemoveRange verifies that the MultiRaft state removes
// a remote node correctly after the Replica was removed from the Store.
func TestRaftAfterRemoveRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Make the split.
splitArgs := adminSplitArgs(proto.KeyMin, []byte("b"), proto.RangeID(1), mtc.stores[0].StoreID())
if _, err := mtc.stores[0].ExecuteCmd(context.Background(), &splitArgs); err != nil {
t.Fatal(err)
}
rangeID := proto.RangeID(2)
mtc.replicateRange(rangeID, 0, 1, 2)
mtc.unreplicateRange(rangeID, 0, 2)
mtc.unreplicateRange(rangeID, 0, 1)
// Wait for the removal to be processed.
util.SucceedsWithin(t, time.Second, func() error {
_, err := mtc.stores[1].GetReplica(rangeID)
if _, ok := err.(*proto.RangeNotFoundError); ok {
return nil
} else if err != nil {
return err
}
return util.Errorf("range still exists")
})
if err := mtc.transport.Send(&multiraft.RaftMessageRequest{
GroupID: proto.RangeID(0),
Message: raftpb.Message{
From: uint64(mtc.stores[2].RaftNodeID()),
To: uint64(mtc.stores[1].RaftNodeID()),
Type: raftpb.MsgHeartbeat,
}}); err != nil {
t.Fatal(err)
}
// Execute another replica change to ensure that MultiRaft has processed the heartbeat just sent.
mtc.replicateRange(proto.RangeID(1), 0, 1)
}
示例13: TestRaftRemoveRace
// TestRaftRemoveRace adds and removes a replica repeatedly in an
// attempt to reproduce a race
// (https://github.com/cockroachdb/cockroach/issues/1911). Note that
// 10 repetitions is not enough to reliably reproduce the problem, but
// it's better than any other tests we have for this (increasing the
// number of repetitions adds an unacceptable amount of test runtime).
func TestRaftRemoveRace(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
rangeID := proto.RangeID(1)
mtc.replicateRange(rangeID, 0, 1, 2)
for i := 0; i < 10; i++ {
mtc.unreplicateRange(rangeID, 0, 2)
mtc.replicateRange(rangeID, 0, 2)
}
}
示例14: String
// String prints out the current status of the cluster.
func (c *Cluster) String() string {
storesRangeCounts := make(map[proto.StoreID]int)
for _, r := range c.ranges {
for _, storeID := range r.getStoreIDs() {
storesRangeCounts[storeID]++
}
}
var nodeIDs []int
for nodeID := range c.nodes {
nodeIDs = append(nodeIDs, int(nodeID))
}
sort.Ints(nodeIDs)
var buf bytes.Buffer
buf.WriteString("Node Info:\n")
for _, nodeID := range nodeIDs {
n := c.nodes[proto.NodeID(nodeID)]
buf.WriteString(n.String())
buf.WriteString("\n")
}
var storeIDs []int
for storeID := range c.stores {
storeIDs = append(storeIDs, int(storeID))
}
sort.Ints(storeIDs)
buf.WriteString("Store Info:\n")
for _, storeID := range storeIDs {
s := c.stores[proto.StoreID(storeID)]
buf.WriteString(s.String(storesRangeCounts[proto.StoreID(storeID)]))
buf.WriteString("\n")
}
var rangeIDs []int
for rangeID := range c.ranges {
rangeIDs = append(rangeIDs, int(rangeID))
}
sort.Ints(rangeIDs)
buf.WriteString("Range Info:\n")
for _, rangeID := range rangeIDs {
r := c.ranges[proto.RangeID(rangeID)]
buf.WriteString(r.String())
buf.WriteString("\n")
}
return buf.String()
}
示例15: TestReplicateAfterSplit
// TestReplicateAfterSplit verifies that a new replica whose start key
// is not KeyMin replicating to a fresh store can apply snapshots correctly.
func TestReplicateAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rangeID := proto.RangeID(1)
splitKey := proto.Key("m")
key := proto.Key("z")
store0 := mtc.stores[0]
// Make the split
splitArgs := adminSplitArgs(proto.KeyMin, splitKey, rangeID, store0.StoreID())
if _, err := store0.ExecuteCmd(context.Background(), &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 := store0.LookupReplica(key, nil).Desc().RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
// Issue an increment for later check.
incArgs := incrementArgs(key, 11, rangeID2, store0.StoreID())
if _, err := store0.ExecuteCmd(context.Background(), &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
mtc.replicateRange(rangeID2, 0, 1)
if mtc.stores[1].LookupReplica(key, nil).GetMaxBytes() == 0 {
t.Error("Range MaxBytes is not set after snapshot applied")
}
// Once it catches up, the effects of increment commands can be seen.
if err := util.IsTrueWithin(func() bool {
getArgs := getArgs(key, rangeID2, mtc.stores[1].StoreID())
// Reading on non-leader replica should use inconsistent read
getArgs.ReadConsistency = proto.INCONSISTENT
reply, err := mtc.stores[1].ExecuteCmd(context.Background(), &getArgs)
if err != nil {
return false
}
getResp := reply.(*proto.GetResponse)
if log.V(1) {
log.Infof("read value %d", mustGetInt(getResp.Value))
}
return mustGetInt(getResp.Value) == 11
}, 1*time.Second); err != nil {
t.Fatal(err)
}
}