本文整理匯總了Golang中github.com/cockroachdb/cockroach/keys.RangeDescriptorKey函數的典型用法代碼示例。如果您正苦於以下問題:Golang RangeDescriptorKey函數的具體用法?Golang RangeDescriptorKey怎麽用?Golang RangeDescriptorKey使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了RangeDescriptorKey函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: countRangeReplicas
func countRangeReplicas(db *client.DB) (int, error) {
desc := &roachpb.RangeDescriptor{}
if err := db.GetProto(keys.RangeDescriptorKey(roachpb.KeyMin), desc); err != nil {
return 0, err
}
return len(desc.Replicas), nil
}
示例2: Snapshot
// Snapshot implements the raft.Storage interface.
func (r *Replica) Snapshot() (raftpb.Snapshot, error) {
// Copy all the data from a consistent RocksDB snapshot into a RaftSnapshotData.
snap := r.rm.NewSnapshot()
defer snap.Close()
var snapData proto.RaftSnapshotData
// Read the range metadata from the snapshot instead of the members
// of the Range struct because they might be changed concurrently.
appliedIndex, err := r.loadAppliedIndex(snap)
if err != nil {
return raftpb.Snapshot{}, err
}
var desc proto.RangeDescriptor
// We ignore intents on the range descriptor (consistent=false) because we
// know they cannot be committed yet; operations that modify range
// descriptors resolve their own intents when they commit.
ok, err := engine.MVCCGetProto(snap, keys.RangeDescriptorKey(r.Desc().StartKey),
r.rm.Clock().Now(), false /* !consistent */, nil, &desc)
if err != nil {
return raftpb.Snapshot{}, util.Errorf("failed to get desc: %s", err)
}
if !ok {
return raftpb.Snapshot{}, util.Errorf("couldn't find range descriptor")
}
// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
snapData.RangeDescriptor = desc
// Iterate over all the data in the range, including local-only data like
// the response cache.
for iter := newRangeDataIterator(r.Desc(), snap); iter.Valid(); iter.Next() {
snapData.KV = append(snapData.KV,
&proto.RaftSnapshotData_KeyValue{Key: iter.Key(), Value: iter.Value()})
}
data, err := gogoproto.Marshal(&snapData)
if err != nil {
return raftpb.Snapshot{}, err
}
// Synthesize our raftpb.ConfState from desc.
var cs raftpb.ConfState
for _, rep := range desc.Replicas {
cs.Nodes = append(cs.Nodes, uint64(proto.MakeRaftNodeID(rep.NodeID, rep.StoreID)))
}
term, err := r.Term(appliedIndex)
if err != nil {
return raftpb.Snapshot{}, util.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
}
return raftpb.Snapshot{
Data: data,
Metadata: raftpb.SnapshotMetadata{
Index: appliedIndex,
Term: term,
ConfState: cs,
},
}, nil
}
示例3: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := multiTestContext{}
mtc.Start(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs, incResp := incrementArgs([]byte("a"), 5, 1, mtc.stores[0].StoreID())
if err := mtc.stores[0].ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incResp}); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetRange(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(proto.ADD_REPLICA,
proto.Replica{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.RangeMetaKey(proto.KeyMax)
meta1 := keys.RangeMetaKey(meta2)
for _, key := range []proto.Key{meta2, meta1} {
metaDesc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key)
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, 1*time.Second, func() error {
getArgs, getResp := getArgs([]byte("a"), 1, mtc.stores[1].StoreID())
getArgs.ReadConsistency = proto.INCONSISTENT
if err := mtc.stores[1].ExecuteCmd(context.Background(), proto.Call{Args: getArgs, Reply: getResp}); err != nil {
return util.Errorf("failed to read data")
}
if v := mustGetInteger(getResp.Value); v != 5 {
return util.Errorf("failed to read correct data: %d", v)
}
return nil
})
}
示例4: TestRemoveRangeWithoutGC
// TestRemoveRangeWithoutGC ensures that we do not panic when a
// replica has been removed but not yet GC'd (and therefore
// does not have an active raft group).
func TestRemoveRangeWithoutGC(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the GC queue and move the range from store 0 to 1.
mtc.stores[0].DisableReplicaGCQueue(true)
const rangeID roachpb.RangeID = 1
mtc.replicateRange(rangeID, 1)
mtc.unreplicateRange(rangeID, 0)
// Wait for store 0 to process the removal.
util.SucceedsWithin(t, time.Second, func() error {
rep, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
desc := rep.Desc()
if len(desc.Replicas) != 1 {
return util.Errorf("range has %d replicas", len(desc.Replicas))
}
return nil
})
// The replica's data is still on disk even though the Replica
// object is removed.
var desc roachpb.RangeDescriptor
descKey := keys.RangeDescriptorKey(roachpb.RKeyMin)
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if !ok {
t.Fatal("expected range descriptor to be present")
}
// Stop and restart the store to reset the replica's raftGroup
// pointer to nil. As long as the store has not been restarted it
// can continue to use its last known replica ID.
mtc.stopStore(0)
mtc.restartStore(0)
// Turn off the GC queue to ensure that the replica is deleted at
// startup instead of by the scanner. This is not 100% guaranteed
// since the scanner could have already run at this point, but it
// should be enough to prevent us from accidentally relying on the
// scanner.
mtc.stores[0].DisableReplicaGCQueue(true)
// The Replica object is not recreated.
if _, err := mtc.stores[0].GetReplica(rangeID); err == nil {
t.Fatalf("expected replica to be missing")
}
// And the data is no longer on disk.
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if ok {
t.Fatal("expected range descriptor to be absent")
}
}
示例5: changeReplicas
func (tc *TestCluster) changeReplicas(
action roachpb.ReplicaChangeType,
startKey roachpb.RKey,
targets ...ReplicationTarget,
) (*roachpb.RangeDescriptor, error) {
rangeDesc := &roachpb.RangeDescriptor{}
// TODO(andrei): the following code has been adapted from
// multiTestContext.replicateRange(). Find a way to share.
for _, target := range targets {
// Perform a consistent read to get the updated range descriptor (as opposed
// to just going to one of the stores), to make sure we have the effects of
// the previous ChangeReplicas call. By the time ChangeReplicas returns the
// raft leader is guaranteed to have the updated version, but followers are
// not.
if err := tc.Servers[0].DB().GetProto(
keys.RangeDescriptorKey(startKey), rangeDesc); err != nil {
return nil, err
}
// Ask an arbitrary replica of the range to perform the change. Note that
// the target for addition/removal is specified, this is about the choice
// of which replica receives the ChangeReplicas operation.
store, err := tc.findMemberStore(rangeDesc.Replicas[0].StoreID)
if err != nil {
return nil, err
}
replica, err := store.GetReplica(rangeDesc.RangeID)
if err != nil {
return nil, err
}
err = replica.ChangeReplicas(context.Background(),
action,
roachpb.ReplicaDescriptor{
NodeID: target.NodeID,
StoreID: target.StoreID,
}, rangeDesc)
if err != nil {
return nil, err
}
}
if err := tc.Servers[0].DB().GetProto(
keys.RangeDescriptorKey(startKey), rangeDesc); err != nil {
return nil, err
}
return rangeDesc, nil
}
示例6: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
meta1 := keys.Addr(keys.RangeMetaKey(meta2))
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, replicaReadTimeout, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return util.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
示例7: SplitRange
// SplitRange splits the range containing splitKey.
// The right range created by the split starts at the split key and extends to the
// original range's end key.
// Returns the new descriptors of the left and right ranges.
//
// splitKey must correspond to a SQL table key (it must end with a family ID /
// col ID).
func (tc *TestCluster) SplitRange(
splitKey roachpb.Key,
) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, error) {
splitRKey, err := keys.Addr(splitKey)
if err != nil {
return nil, nil, err
}
origRangeDesc, err := tc.LookupRange(splitKey)
if err != nil {
return nil, nil, err
}
if origRangeDesc.StartKey.Equal(splitRKey) {
return nil, nil, errors.Errorf(
"cannot split range %+v at start key %q", origRangeDesc, splitKey)
}
splitReq := roachpb.AdminSplitRequest{
Span: roachpb.Span{
Key: splitKey,
},
SplitKey: splitKey,
}
_, pErr := client.SendWrapped(tc.Servers[0].GetDistSender(), nil, &splitReq)
if pErr != nil {
return nil, nil, errors.Errorf(
"%q: split unexpected error: %s", splitReq.SplitKey, pErr)
}
leftRangeDesc := new(roachpb.RangeDescriptor)
rightRangeDesc := new(roachpb.RangeDescriptor)
if err := tc.Servers[0].DB().GetProto(
keys.RangeDescriptorKey(origRangeDesc.StartKey), leftRangeDesc); err != nil {
return nil, nil, errors.Wrap(err, "could not look up left-hand side descriptor")
}
// The split point might not be exactly the one we requested (it can be
// adjusted slightly so we don't split in the middle of SQL rows). Update it
// to the real point.
splitRKey = leftRangeDesc.EndKey
if err := tc.Servers[0].DB().GetProto(
keys.RangeDescriptorKey(splitRKey), rightRangeDesc); err != nil {
return nil, nil, errors.Wrap(err, "could not look up right-hand side descriptor")
}
return leftRangeDesc, rightRangeDesc, nil
}
示例8: TestBatchPrevNext
// TestBatchPrevNext tests batch.{Prev,Next}.
func TestBatchPrevNext(t *testing.T) {
defer leaktest.AfterTest(t)()
loc := func(s string) string {
return string(keys.RangeDescriptorKey(roachpb.RKey(s)))
}
span := func(strs ...string) []roachpb.Span {
var r []roachpb.Span
for i, str := range strs {
if i%2 == 0 {
r = append(r, roachpb.Span{Key: roachpb.Key(str)})
} else {
r[len(r)-1].EndKey = roachpb.Key(str)
}
}
return r
}
max, min := string(roachpb.RKeyMax), string(roachpb.RKeyMin)
abc := span("a", "", "b", "", "c", "")
testCases := []struct {
spans []roachpb.Span
key, expFW, expBW string
}{
{spans: span("a", "c", "b", ""), key: "b", expFW: "b", expBW: "b"},
{spans: span("a", "c", "b", ""), key: "a", expFW: "a", expBW: "a"},
{spans: span("a", "c", "d", ""), key: "c", expFW: "d", expBW: "c"},
{spans: span("a", "c\x00", "d", ""), key: "c", expFW: "c", expBW: "c"},
{spans: abc, key: "b", expFW: "b", expBW: "b"},
{spans: abc, key: "b\x00", expFW: "c", expBW: "b\x00"},
{spans: abc, key: "bb", expFW: "c", expBW: "b"},
{spans: span(), key: "whatevs", expFW: max, expBW: min},
{spans: span(loc("a"), loc("c")), key: "c", expFW: "c", expBW: "c"},
{spans: span(loc("a"), loc("c")), key: "c\x00", expFW: max, expBW: "c\x00"},
}
for i, test := range testCases {
var ba roachpb.BatchRequest
for _, span := range test.spans {
args := &roachpb.ScanRequest{}
args.Key, args.EndKey = span.Key, span.EndKey
ba.Add(args)
}
if next, err := next(ba, roachpb.RKey(test.key)); err != nil {
t.Errorf("%d: %v", i, err)
} else if !bytes.Equal(next, roachpb.Key(test.expFW)) {
t.Errorf("%d: next: expected %q, got %q", i, test.expFW, next)
}
if prev, err := prev(ba, roachpb.RKey(test.key)); err != nil {
t.Errorf("%d: %v", i, err)
} else if !bytes.Equal(prev, roachpb.Key(test.expBW)) {
t.Errorf("%d: prev: expected %q, got %q", i, test.expBW, prev)
}
}
}
示例9: createRangeData
// createRangeData creates sample range data in all possible areas of
// the key space. Returns a slice of the encoded keys of all created
// data.
func createRangeData(t *testing.T, r *Replica) []engine.MVCCKey {
ts0 := hlc.ZeroTimestamp
ts := hlc.Timestamp{WallTime: 1}
desc := r.Desc()
keyTSs := []struct {
key roachpb.Key
ts hlc.Timestamp
}{
{keys.AbortCacheKey(r.RangeID, testTxnID), ts0},
{keys.AbortCacheKey(r.RangeID, testTxnID2), ts0},
{keys.RangeFrozenStatusKey(r.RangeID), ts0},
{keys.RangeLastGCKey(r.RangeID), ts0},
{keys.RaftAppliedIndexKey(r.RangeID), ts0},
{keys.RaftTruncatedStateKey(r.RangeID), ts0},
{keys.LeaseAppliedIndexKey(r.RangeID), ts0},
{keys.RangeStatsKey(r.RangeID), ts0},
{keys.RaftHardStateKey(r.RangeID), ts0},
{keys.RaftLastIndexKey(r.RangeID), ts0},
{keys.RaftLogKey(r.RangeID, 1), ts0},
{keys.RaftLogKey(r.RangeID, 2), ts0},
{keys.RangeLastReplicaGCTimestampKey(r.RangeID), ts0},
{keys.RangeLastVerificationTimestampKey(r.RangeID), ts0},
{keys.RangeDescriptorKey(desc.StartKey), ts},
{keys.TransactionKey(roachpb.Key(desc.StartKey), uuid.NewV4()), ts0},
{keys.TransactionKey(roachpb.Key(desc.StartKey.Next()), uuid.NewV4()), ts0},
{keys.TransactionKey(fakePrevKey(desc.EndKey), uuid.NewV4()), ts0},
// TODO(bdarnell): KeyMin.Next() results in a key in the reserved system-local space.
// Once we have resolved https://github.com/cockroachdb/cockroach/issues/437,
// replace this with something that reliably generates the first valid key in the range.
//{r.Desc().StartKey.Next(), ts},
// The following line is similar to StartKey.Next() but adds more to the key to
// avoid falling into the system-local space.
{append(append([]byte{}, desc.StartKey...), '\x02'), ts},
{fakePrevKey(r.Desc().EndKey), ts},
}
keys := []engine.MVCCKey{}
for _, keyTS := range keyTSs {
if err := engine.MVCCPut(context.Background(), r.store.Engine(), nil, keyTS.key, keyTS.ts, roachpb.MakeValueFromString("value"), nil); err != nil {
t.Fatal(err)
}
keys = append(keys, engine.MVCCKey{Key: keyTS.key, Timestamp: keyTS.ts})
}
return keys
}
示例10: createRangeData
// createRangeData creates sample range data in all possible areas of
// the key space. Returns a slice of the encoded keys of all created
// data.
func createRangeData(r *Replica, t *testing.T) []roachpb.EncodedKey {
ts0 := roachpb.ZeroTimestamp
ts := roachpb.Timestamp{WallTime: 1}
keyTSs := []struct {
key roachpb.Key
ts roachpb.Timestamp
}{
{keys.ResponseCacheKey(r.Desc().RangeID, &roachpb.ClientCmdID{WallTime: 1, Random: 1}), ts0},
{keys.ResponseCacheKey(r.Desc().RangeID, &roachpb.ClientCmdID{WallTime: 2, Random: 2}), ts0},
{keys.RaftHardStateKey(r.Desc().RangeID), ts0},
{keys.RaftLogKey(r.Desc().RangeID, 1), ts0},
{keys.RaftLogKey(r.Desc().RangeID, 2), ts0},
{keys.RangeGCMetadataKey(r.Desc().RangeID), ts0},
{keys.RangeLastVerificationTimestampKey(r.Desc().RangeID), ts0},
{keys.RangeStatsKey(r.Desc().RangeID), ts0},
{keys.RangeDescriptorKey(r.Desc().StartKey), ts},
{keys.TransactionKey(roachpb.Key(r.Desc().StartKey), []byte("1234")), ts0},
{keys.TransactionKey(roachpb.Key(r.Desc().StartKey.Next()), []byte("5678")), ts0},
{keys.TransactionKey(fakePrevKey(r.Desc().EndKey), []byte("2468")), ts0},
// TODO(bdarnell): KeyMin.Next() results in a key in the reserved system-local space.
// Once we have resolved https://github.com/cockroachdb/cockroach/issues/437,
// replace this with something that reliably generates the first valid key in the range.
//{r.Desc().StartKey.Next(), ts},
// The following line is similar to StartKey.Next() but adds more to the key to
// avoid falling into the system-local space.
{append(append([]byte{}, r.Desc().StartKey...), '\x01'), ts},
{fakePrevKey(r.Desc().EndKey), ts},
}
keys := []roachpb.EncodedKey{}
for _, keyTS := range keyTSs {
if err := engine.MVCCPut(r.store.Engine(), nil, keyTS.key, keyTS.ts, roachpb.MakeValueFromString("value"), nil); err != nil {
t.Fatal(err)
}
keys = append(keys, engine.MVCCEncodeKey(keyTS.key))
if !keyTS.ts.Equal(ts0) {
keys = append(keys, engine.MVCCEncodeVersionKey(keyTS.key, keyTS.ts))
}
}
return keys
}
示例11: TestTruncate
func TestTruncate(t *testing.T) {
defer leaktest.AfterTest(t)
loc := func(s string) string {
return string(keys.RangeDescriptorKey(roachpb.RKey(s)))
}
testCases := []struct {
keys [][2]string
expKeys [][2]string
from, to string
desc [2]string // optional, defaults to {from,to}
err string
}{
{
// Keys inside of active range.
keys: [][2]string{{"a", "q"}, {"c"}, {"b, e"}, {"q"}},
expKeys: [][2]string{{"a", "q"}, {"c"}, {"b, e"}, {"q"}},
from: "a", to: "q\x00",
},
{
// Keys outside of active range.
keys: [][2]string{{"a"}, {"a", "b"}, {"q"}, {"q", "z"}},
expKeys: [][2]string{{}, {}, {}, {}},
from: "b", to: "q",
},
{
// Range-local keys inside of active range.
keys: [][2]string{{loc("b")}, {loc("c")}},
expKeys: [][2]string{{loc("b")}, {loc("c")}},
from: "b", to: "e",
},
{
// Range-local key outside of active range.
keys: [][2]string{{loc("a")}},
expKeys: [][2]string{{}},
from: "b", to: "e",
},
{
// Range-local range contained in active range.
keys: [][2]string{{loc("b"), loc("e") + "\x00"}},
expKeys: [][2]string{{loc("b"), loc("e") + "\x00"}},
from: "b", to: "e\x00",
},
{
// Range-local range not contained in active range.
keys: [][2]string{{loc("a"), loc("b")}},
from: "b", to: "e",
err: "local key range must not span ranges",
},
{
// Mixed range-local vs global key range.
keys: [][2]string{{loc("c"), "d\x00"}},
from: "b", to: "e",
err: "local key mixed with global key",
},
{
// Key range touching and intersecting active range.
keys: [][2]string{{"a", "b"}, {"a", "c"}, {"p", "q"}, {"p", "r"}, {"a", "z"}},
expKeys: [][2]string{{}, {"b", "c"}, {"p", "q"}, {"p", "q"}, {"b", "q"}},
from: "b", to: "q",
},
// Active key range is intersection of descriptor and [from,to).
{
keys: [][2]string{{"c", "q"}},
expKeys: [][2]string{{"d", "p"}},
from: "a", to: "z",
desc: [2]string{"d", "p"},
},
{
keys: [][2]string{{"c", "q"}},
expKeys: [][2]string{{"d", "p"}},
from: "d", to: "p",
desc: [2]string{"a", "z"},
},
}
for i, test := range testCases {
ba := &roachpb.BatchRequest{}
for _, ks := range test.keys {
if len(ks[1]) > 0 {
ba.Add(&roachpb.ScanRequest{
Span: roachpb.Span{Key: roachpb.Key(ks[0]), EndKey: roachpb.Key(ks[1])},
})
} else {
ba.Add(&roachpb.GetRequest{
Span: roachpb.Span{Key: roachpb.Key(ks[0])},
})
}
}
original := proto.Clone(ba).(*roachpb.BatchRequest)
desc := &roachpb.RangeDescriptor{
StartKey: roachpb.RKey(test.desc[0]), EndKey: roachpb.RKey(test.desc[1]),
}
if len(desc.StartKey) == 0 {
desc.StartKey = roachpb.RKey(test.from)
}
if len(desc.EndKey) == 0 {
desc.EndKey = roachpb.RKey(test.to)
}
//.........這裏部分代碼省略.........
示例12: TestStoreRangeSplit
// TestStoreRangeSplit executes a split of a range and verifies that the
// resulting ranges respond to the right key ranges and that their stats
// and response caches have been properly accounted for.
func TestStoreRangeSplit(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
defer stopper.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.RKey("m")
content := roachpb.Key("asdvb")
// First, write some values left and right of the proposed split key.
pArgs := putArgs([]byte("c"), content)
if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("x"), content)
if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
t.Fatal(err)
}
// Increments are a good way of testing the response cache. Up here, we
// address them to the original range, then later to the one that contains
// the key.
lCmdID := roachpb.ClientCmdID{WallTime: 123, Random: 423}
lIncArgs := incrementArgs([]byte("apoptosis"), 100)
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
CmdID: lCmdID,
}, &lIncArgs); err != nil {
t.Fatal(err)
}
rIncArgs := incrementArgs([]byte("wobble"), 10)
rCmdID := roachpb.ClientCmdID{WallTime: 12, Random: 42}
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
CmdID: rCmdID,
}, &rIncArgs); err != nil {
t.Fatal(err)
}
// Get the original stats for key and value bytes.
var ms engine.MVCCStats
if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
t.Fatal(err)
}
keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
// Split the range.
args := adminSplitArgs(roachpb.RKeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
// Verify no intents remains on range descriptor keys.
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(roachpb.RKeyMin), keys.RangeDescriptorKey(splitKey)} {
if _, _, err := engine.MVCCGet(store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
t.Fatal(err)
}
}
rng := store.LookupReplica(roachpb.RKeyMin, nil)
newRng := store.LookupReplica([]byte("m"), nil)
if !bytes.Equal(newRng.Desc().StartKey, splitKey) || !bytes.Equal(splitKey, rng.Desc().EndKey) {
t.Errorf("ranges mismatched, wanted %q=%q=%q", newRng.Desc().StartKey, splitKey, rng.Desc().EndKey)
}
if !bytes.Equal(newRng.Desc().EndKey, roachpb.RKeyMax) || !bytes.Equal(rng.Desc().StartKey, roachpb.RKeyMin) {
t.Errorf("new ranges do not cover KeyMin-KeyMax, but only %q-%q", rng.Desc().StartKey, newRng.Desc().EndKey)
}
// Try to get values from both left and right of where the split happened.
gArgs := getArgs([]byte("c"))
if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil {
t.Fatal(err)
} else if gReply := reply.(*roachpb.GetResponse); !bytes.Equal(gReply.Value.GetRawBytes(), content) {
t.Fatalf("actual value %q did not match expected value %q", gReply.Value.GetRawBytes(), content)
}
gArgs = getArgs([]byte("x"))
if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: newRng.Desc().RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if gReply := reply.(*roachpb.GetResponse); !bytes.Equal(gReply.Value.GetRawBytes(), content) {
t.Fatalf("actual value %q did not match expected value %q", gReply.Value.GetRawBytes(), content)
}
// Send out an increment request copied from above (same ClientCmdID) which
// remains in the old range.
if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
CmdID: lCmdID,
}, &lIncArgs); err != nil {
t.Fatal(err)
} else if lIncReply := reply.(*roachpb.IncrementResponse); lIncReply.NewValue != 100 {
t.Errorf("response cache broken in old range, expected %d but got %d", lIncArgs.Increment, lIncReply.NewValue)
}
// Send out the same increment copied from above (same ClientCmdID), but
// now to the newly created range (which should hold that key).
if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: newRng.Desc().RangeID,
CmdID: rCmdID,
}, &rIncArgs); err != nil {
//.........這裏部分代碼省略.........
示例13: TestStoreVerifyKeys
// TestStoreVerifyKeys checks that key length is enforced and
// that end keys must sort >= start.
func TestStoreVerifyKeys(t *testing.T) {
defer leaktest.AfterTest(t)
store, _, stopper := createTestStore(t)
defer stopper.Stop()
tooLongKey := proto.Key(strings.Repeat("x", proto.KeyMaxLength+1))
// Start with a too-long key on a get.
gArgs := getArgs(tooLongKey, 1, store.StoreID())
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
t.Fatal("expected error for key too long")
}
// Try a start key == KeyMax.
gArgs.Key = proto.KeyMax
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
t.Fatal("expected error for start key == KeyMax")
}
// Try a get with an end key specified (get requires only a start key and should fail).
gArgs.EndKey = proto.KeyMax
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
t.Fatal("expected error for end key specified on a non-range-based operation")
}
// Try a scan with too-long EndKey.
sArgs := scanArgs(proto.KeyMin, tooLongKey, 1, store.StoreID())
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
t.Fatal("expected error for end key too long")
}
// Try a scan with end key < start key.
sArgs.Key = []byte("b")
sArgs.EndKey = []byte("a")
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
t.Fatal("expected error for end key < start")
}
// Try a scan with start key == end key.
sArgs.Key = []byte("a")
sArgs.EndKey = sArgs.Key
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
t.Fatal("expected error for start == end key")
}
// Try a put to meta2 key which would otherwise exceed maximum key
// length, but is accepted because of the meta prefix.
meta2KeyMax := keys.MakeKey(keys.Meta2Prefix, proto.KeyMax)
pArgs := putArgs(meta2KeyMax, []byte("value"), 1, store.StoreID())
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
t.Fatalf("unexpected error on put to meta2 value: %s", err)
}
// Try to put a range descriptor record for a start key which is
// maximum length.
key := append([]byte{}, proto.KeyMax...)
key[len(key)-1] = 0x01
pArgs = putArgs(keys.RangeDescriptorKey(key), []byte("value"), 1, store.StoreID())
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
t.Fatalf("unexpected error on put to range descriptor for KeyMax value: %s", err)
}
// Try a put to txn record for a meta2 key (note that this doesn't
// actually happen in practice, as txn records are not put directly,
// but are instead manipulated only through txn methods).
pArgs = putArgs(keys.TransactionKey(meta2KeyMax, []byte(uuid.NewUUID4())),
[]byte("value"), 1, store.StoreID())
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
t.Fatalf("unexpected error on put to txn meta2 value: %s", err)
}
}
示例14: TestLogRebalances
func TestLogRebalances(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
// Use a client to get the RangeDescriptor for the first range. We will use
// this range's information to log fake rebalance events.
db := s.DB()
desc := &roachpb.RangeDescriptor{}
if pErr := db.GetProto(keys.RangeDescriptorKey(roachpb.RKeyMin), desc); pErr != nil {
t.Fatal(pErr)
}
// This code assumes that there is only one TestServer, and thus that
// StoreID 1 is present on the testserver. If this assumption changes in the
// future, *any* store will work, but a new method will need to be added to
// Stores (or a creative usage of VisitStores could suffice).
store, pErr := s.Stores().GetStore(roachpb.StoreID(1))
if pErr != nil {
t.Fatal(pErr)
}
// Log several fake events using the store.
logEvent := func(changeType roachpb.ReplicaChangeType) {
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
return store.LogReplicaChangeTest(txn, changeType, desc.Replicas[0], *desc)
}); pErr != nil {
t.Fatal(pErr)
}
}
reg := store.Registry()
checkMetrics := func(expAdds, expRemoves int64) {
if a, e := reg.GetCounter("range.adds").Count(), expAdds; a != e {
t.Errorf("range adds %d != expected %d", a, e)
}
if a, e := reg.GetCounter("range.removes").Count(), expRemoves; a != e {
t.Errorf("range removes %d != expected %d", a, e)
}
}
logEvent(roachpb.ADD_REPLICA)
checkMetrics(1 /*add*/, 0 /*remove*/)
logEvent(roachpb.ADD_REPLICA)
checkMetrics(2 /*adds*/, 0 /*remove*/)
logEvent(roachpb.REMOVE_REPLICA)
checkMetrics(2 /*adds*/, 1 /*remove*/)
// Open a SQL connection to verify that the events have been logged.
pgURL, cleanupFn := sqlutils.PGUrl(t, s, security.RootUser, "TestLogRebalances")
defer cleanupFn()
sqlDB, err := sql.Open("postgres", pgURL.String())
if err != nil {
t.Fatal(err)
}
defer sqlDB.Close()
// verify that two add replica events have been logged.
// TODO(mrtracy): placeholders still appear to be broken, this query should
// be using a string placeholder for the eventType value.
rows, err := sqlDB.Query(`SELECT rangeID, info FROM system.rangelog WHERE eventType = 'add'`)
if err != nil {
t.Fatal(err)
}
var count int
for rows.Next() {
count++
var rangeID int64
var infoStr sql.NullString
if err := rows.Scan(&rangeID, &infoStr); err != nil {
t.Fatal(err)
}
if a, e := roachpb.RangeID(rangeID), desc.RangeID; a != e {
t.Errorf("wrong rangeID %d recorded for add event, expected %d", a, e)
}
// Verify that info returns a json struct.
if !infoStr.Valid {
t.Errorf("info not recorded for add replica of range %d", rangeID)
}
var info struct {
AddReplica roachpb.ReplicaDescriptor
UpdatedDesc roachpb.RangeDescriptor
}
if err := json.Unmarshal([]byte(infoStr.String), &info); err != nil {
t.Errorf("error unmarshalling info string for add replica %d: %s", rangeID, err)
continue
}
if int64(info.UpdatedDesc.RangeID) != rangeID {
t.Errorf("recorded wrong updated descriptor %s for add replica of range %d", info.UpdatedDesc, rangeID)
}
if a, e := info.AddReplica, desc.Replicas[0]; a != e {
t.Errorf("recorded wrong updated replica %s for add replica of range %d, expected %s",
a, rangeID, e)
}
}
if rows.Err() != nil {
t.Fatal(rows.Err())
}
if a, e := count, 2; a != e {
t.Errorf("expected %d AddReplica events logged, found %d", e, a)
//.........這裏部分代碼省略.........
示例15: TestTruncate
func TestTruncate(t *testing.T) {
defer leaktest.AfterTest(t)()
loc := func(s string) string {
return string(keys.RangeDescriptorKey(roachpb.RKey(s)))
}
locPrefix := func(s string) string {
return string(keys.MakeRangeKeyPrefix(roachpb.RKey(s)))
}
testCases := []struct {
keys [][2]string
expKeys [][2]string
from, to string
desc [2]string // optional, defaults to {from,to}
err string
}{
{
// Keys inside of active range.
keys: [][2]string{{"a", "q"}, {"c"}, {"b, e"}, {"q"}},
expKeys: [][2]string{{"a", "q"}, {"c"}, {"b, e"}, {"q"}},
from: "a", to: "q\x00",
},
{
// Keys outside of active range.
keys: [][2]string{{"a"}, {"a", "b"}, {"q"}, {"q", "z"}},
expKeys: [][2]string{{}, {}, {}, {}},
from: "b", to: "q",
},
{
// Range-local keys inside of active range.
keys: [][2]string{{loc("b")}, {loc("c")}},
expKeys: [][2]string{{loc("b")}, {loc("c")}},
from: "b", to: "e",
},
{
// Range-local key outside of active range.
keys: [][2]string{{loc("a")}},
expKeys: [][2]string{{}},
from: "b", to: "e",
},
{
// Range-local range contained in active range.
keys: [][2]string{{loc("b"), loc("e") + "\x00"}},
expKeys: [][2]string{{loc("b"), loc("e") + "\x00"}},
from: "b", to: "e\x00",
},
{
// Range-local range not contained in active range.
keys: [][2]string{{loc("a"), loc("b")}},
expKeys: [][2]string{{}},
from: "c", to: "e",
},
{
// Range-local range not contained in active range.
keys: [][2]string{{loc("a"), locPrefix("b")}, {loc("e"), loc("f")}},
expKeys: [][2]string{{}, {}},
from: "b", to: "e",
},
{
// Range-local range partially contained in active range.
keys: [][2]string{{loc("a"), loc("b")}},
expKeys: [][2]string{{loc("a"), locPrefix("b")}},
from: "a", to: "b",
},
{
// Range-local range partially contained in active range.
keys: [][2]string{{loc("a"), loc("b")}},
expKeys: [][2]string{{locPrefix("b"), loc("b")}},
from: "b", to: "e",
},
{
// Range-local range contained in active range.
keys: [][2]string{{locPrefix("b"), loc("b")}},
expKeys: [][2]string{{locPrefix("b"), loc("b")}},
from: "b", to: "c",
},
{
// Mixed range-local vs global key range.
keys: [][2]string{{loc("c"), "d\x00"}},
from: "b", to: "e",
err: "local key mixed with global key",
},
{
// Key range touching and intersecting active range.
keys: [][2]string{{"a", "b"}, {"a", "c"}, {"p", "q"}, {"p", "r"}, {"a", "z"}},
expKeys: [][2]string{{}, {"b", "c"}, {"p", "q"}, {"p", "q"}, {"b", "q"}},
from: "b", to: "q",
},
// Active key range is intersection of descriptor and [from,to).
{
keys: [][2]string{{"c", "q"}},
expKeys: [][2]string{{"d", "p"}},
from: "a", to: "z",
desc: [2]string{"d", "p"},
},
{
keys: [][2]string{{"c", "q"}},
expKeys: [][2]string{{"d", "p"}},
from: "d", to: "p",
desc: [2]string{"a", "z"},
},
//.........這裏部分代碼省略.........