本文整理匯總了Golang中github.com/cockroachdb/cockroach/keys.RangeMetaKey函數的典型用法代碼示例。如果您正苦於以下問題:Golang RangeMetaKey函數的具體用法?Golang RangeMetaKey怎麽用?Golang RangeMetaKey使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了RangeMetaKey函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := multiTestContext{}
mtc.Start(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs, incResp := incrementArgs([]byte("a"), 5, 1, mtc.stores[0].StoreID())
if err := mtc.stores[0].ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incResp}); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetRange(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(proto.ADD_REPLICA,
proto.Replica{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.RangeMetaKey(proto.KeyMax)
meta1 := keys.RangeMetaKey(meta2)
for _, key := range []proto.Key{meta2, meta1} {
metaDesc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key)
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, 1*time.Second, func() error {
getArgs, getResp := getArgs([]byte("a"), 1, mtc.stores[1].StoreID())
getArgs.ReadConsistency = proto.INCONSISTENT
if err := mtc.stores[1].ExecuteCmd(context.Background(), proto.Call{Args: getArgs, Reply: getResp}); err != nil {
return util.Errorf("failed to read data")
}
if v := mustGetInteger(getResp.Value); v != 5 {
return util.Errorf("failed to read correct data: %d", v)
}
return nil
})
}
示例2: TestRangeCacheAssumptions
func TestRangeCacheAssumptions(t *testing.T) {
defer leaktest.AfterTest(t)
expKeyMin := keys.RangeMetaKey(keys.RangeMetaKey(keys.RangeMetaKey(proto.Key("test"))))
if !bytes.Equal(expKeyMin, proto.KeyMin) {
t.Fatalf("RangeCache relies on RangeMetaKey returning KeyMin after two levels, but got %s", expKeyMin)
}
}
示例3: TestRangeCacheClearOverlappingMeta
// TestRangeCacheClearOverlappingMeta prevents regression of a bug which caused
// a panic when clearing overlapping descriptors for [KeyMin, Meta2Key). The
// issue was that when attempting to clear out descriptors which were subsumed
// by the above range, an iteration over the corresponding meta keys was
// performed, with the left endpoint excluded. This exclusion was incorrect: it
// first incremented the start key (KeyMin) and then formed the meta key; for
// KeyMin this leads to Meta2Prefix\x00. For the above EndKey, the meta key is
// a Meta1key which sorts before Meta2Prefix\x00, causing a panic. The fix was
// simply to increment the meta key for StartKey, not StartKey itself.
func TestRangeCacheClearOverlappingMeta(t *testing.T) {
defer leaktest.AfterTest(t)
firstDesc := &proto.RangeDescriptor{
StartKey: proto.KeyMin,
EndKey: proto.Key("zzz"),
}
restDesc := &proto.RangeDescriptor{
StartKey: firstDesc.StartKey,
EndKey: proto.KeyMax,
}
cache := newRangeDescriptorCache(nil, 2<<10)
cache.rangeCache.Add(rangeCacheKey(keys.RangeMetaKey(firstDesc.EndKey)),
firstDesc)
cache.rangeCache.Add(rangeCacheKey(keys.RangeMetaKey(restDesc.EndKey)),
restDesc)
// Add new range, corresponding to splitting the first range at a meta key.
metaSplitDesc := &proto.RangeDescriptor{
StartKey: proto.KeyMin,
EndKey: proto.Key(keys.RangeMetaKey(proto.Key("foo"))),
}
func() {
defer func() {
if r := recover(); r != nil {
t.Fatalf("invocation of clearOverlappingCachedRangeDescriptors panicked: %v", r)
}
}()
cache.clearOverlappingCachedRangeDescriptors(metaSplitDesc.EndKey, keys.RangeMetaKey(metaSplitDesc.EndKey), metaSplitDesc)
}()
}
示例4: clearOverlappingCachedRangeDescriptors
// clearOverlappingCachedRangeDescriptors looks up and clears any
// cache entries which overlap the specified key or descriptor.
func (rdc *rangeDescriptorCache) clearOverlappingCachedRangeDescriptors(key, metaKey proto.Key, desc *proto.RangeDescriptor) {
if desc.StartKey.Equal(desc.EndKey) { // True for some unittests.
return
}
// Clear out any descriptors which subsume the key which we're going
// to cache. For example, if an existing KeyMin->KeyMax descriptor
// should be cleared out in favor of a KeyMin->"m" descriptor.
k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
if ok {
descriptor := v.(*proto.RangeDescriptor)
addrKey := keys.KeyAddress(key)
if !addrKey.Less(descriptor.StartKey) && !descriptor.EndKey.Less(addrKey) {
if log.V(1) {
log.Infof("clearing overlapping descriptor: key=%s desc=%s", k, descriptor)
}
rdc.rangeCache.Del(k.(rangeCacheKey))
}
}
// Also clear any descriptors which are subsumed by the one we're
// going to cache. This could happen on a merge (and also happens
// when there's a lot of concurrency). Iterate from StartKey.Next().
rdc.rangeCache.DoRange(func(k, v interface{}) {
if log.V(1) {
log.Infof("clearing subsumed descriptor: key=%s desc=%s", k, v.(*proto.RangeDescriptor))
}
rdc.rangeCache.Del(k.(rangeCacheKey))
}, rangeCacheKey(keys.RangeMetaKey(desc.StartKey.Next())),
rangeCacheKey(keys.RangeMetaKey(desc.EndKey)))
}
示例5: getCachedRangeDescriptorLocked
// getCachedRangeDescriptorLocked is a helper function to retrieve the
// descriptor of the range which contains the given key, if present in the
// cache. It is assumed that the caller holds a read lock on rdc.rangeCacheMu.
func (rdc *rangeDescriptorCache) getCachedRangeDescriptorLocked(key proto.Key, inclusive bool) (
rangeCacheKey, *proto.RangeDescriptor) {
// The cache is indexed using the end-key of the range, but the
// end-key is non-inclusive by default.
var metaKey proto.Key
if !inclusive {
metaKey = keys.RangeMetaKey(key.Next())
} else {
metaKey = keys.RangeMetaKey(key)
}
k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
if !ok {
return nil, nil
}
metaEndKey := k.(rangeCacheKey)
rd := v.(*proto.RangeDescriptor)
// Check that key actually belongs to the range.
if !rd.ContainsKey(key) {
// The key is the EndKey and we're inclusive, so just return the range descriptor.
if inclusive && key.Equal(rd.EndKey) {
return metaEndKey, rd
}
return nil, nil
}
// The key is the StartKey, but we're inclusive and thus need to return the
// previous range descriptor, but it is not in the cache yet.
if inclusive && key.Equal(rd.StartKey) {
return nil, nil
}
return metaEndKey, rd
}
示例6: TestRangeSplitMeta
// TestRangeSplitMeta executes various splits (including at meta addressing)
// and checks that all created intents are resolved. This includes both intents
// which are resolved synchronously with EndTransaction and via RPC.
func TestRangeSplitMeta(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
splitKeys := []roachpb.Key{roachpb.Key("G"), keys.RangeMetaKey(roachpb.Key("F")),
keys.RangeMetaKey(roachpb.Key("K")), keys.RangeMetaKey(roachpb.Key("H"))}
// Execute the consecutive splits.
for _, splitKey := range splitKeys {
log.Infof("starting split at key %q...", splitKey)
if err := s.DB.AdminSplit(splitKey); err != nil {
t.Fatal(err)
}
log.Infof("split at key %q complete", splitKey)
}
if err := util.IsTrueWithin(func() bool {
if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, roachpb.KeyMax, 0, roachpb.MaxTimestamp, true, nil); err != nil {
log.Infof("mvcc scan should be clean: %s", err)
return false
}
return true
}, 500*time.Millisecond); err != nil {
t.Error("failed to verify no dangling intents within 500ms")
}
}
示例7: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
meta1 := keys.Addr(keys.RangeMetaKey(meta2))
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, replicaReadTimeout, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return util.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
示例8: EvictCachedRangeDescriptor
// EvictCachedRangeDescriptor will evict any cached range descriptors
// for the given key. It is intended that this method be called from a
// consumer of rangeDescriptorCache if the returned range descriptor is
// discovered to be stale.
// seenDesc should always be passed in and is used as the basis of a
// compare-and-evict (as pointers); if it is nil, eviction is unconditional
// but a warning will be logged.
func (rdc *rangeDescriptorCache) EvictCachedRangeDescriptor(descKey proto.Key, seenDesc *proto.RangeDescriptor) {
if seenDesc == nil {
log.Warningf("compare-and-evict for key %s with nil descriptor; clearing unconditionally", descKey)
}
rdc.rangeCacheMu.Lock()
defer rdc.rangeCacheMu.Unlock()
rngKey, cachedDesc := rdc.getCachedRangeDescriptorLocked(descKey)
// Note that we're doing a "compare-and-erase": If seenDesc is not nil,
// we want to clean the cache only if it equals the cached range
// descriptor as a pointer. If not, then likely some other caller
// already evicted previously, and we can save work by not doing it
// again (which would prompt another expensive lookup).
if seenDesc != nil && seenDesc != cachedDesc {
return
}
for !bytes.Equal(descKey, proto.KeyMin) {
if log.V(2) {
log.Infof("evict cached descriptor: key=%s desc=%s\n%s", descKey, cachedDesc, rdc.stringLocked())
} else if log.V(1) {
log.Infof("evict cached descriptor: key=%s desc=%s", descKey, cachedDesc)
}
rdc.rangeCache.Del(rngKey)
// Retrieve the metadata range key for the next level of metadata, and
// evict that key as well. This loop ends after the meta1 range, which
// returns KeyMin as its metadata key.
descKey = keys.RangeMetaKey(descKey)
rngKey, cachedDesc = rdc.getCachedRangeDescriptorLocked(descKey)
}
}
示例9: metaKey
func metaKey(key roachpb.RKey) []byte {
rk, err := keys.Addr(keys.RangeMetaKey(key))
if err != nil {
panic(err)
}
return rk
}
示例10: runLsRanges
func runLsRanges(cmd *cobra.Command, args []string) {
if len(args) > 1 {
mustUsage(cmd)
return
}
var startKey proto.Key
if len(args) >= 1 {
startKey = keys.RangeMetaKey(proto.Key(args[0]))
} else {
startKey = keys.Meta2Prefix
}
kvDB, stopper := makeDBClient()
defer stopper.Stop()
rows, err := kvDB.Scan(startKey, keys.Meta2Prefix.PrefixEnd(), maxResults)
if err != nil {
fmt.Fprintf(os.Stderr, "scan failed: %s\n", err)
osExit(1)
return
}
for _, row := range rows {
desc := &proto.RangeDescriptor{}
if err := row.ValueProto(desc); err != nil {
fmt.Fprintf(os.Stderr, "%s: unable to unmarshal range descriptor\n", row.Key)
continue
}
fmt.Printf("%s-%s [%d]\n", desc.StartKey, desc.EndKey, desc.RangeID)
for i, replica := range desc.Replicas {
fmt.Printf("\t%d: node-id=%d store-id=%d\n",
i, replica.NodeID, replica.StoreID)
}
}
fmt.Printf("%d result(s)\n", len(rows))
}
示例11: rangeAddressing
// rangeAddressing updates or deletes the range addressing metadata
// for the range specified by desc. The action to take is specified by
// the supplied metaAction function.
//
// The rules for meta1 and meta2 records are as follows:
//
// 1. If desc.StartKey or desc.EndKey is meta1:
// - ERROR
// 2. If desc.EndKey is meta2:
// - meta1(desc.EndKey)
// 3. If desc.EndKey is normal user key:
// - meta2(desc.EndKey)
// 3a. If desc.StartKey is KeyMin or meta2:
// - meta1(KeyMax)
func rangeAddressing(b *client.Batch, desc *roachpb.RangeDescriptor, action metaAction) error {
// 1. handle illegal case of start or end key being meta1.
if bytes.HasPrefix(desc.EndKey, keys.Meta1Prefix) ||
bytes.HasPrefix(desc.StartKey, keys.Meta1Prefix) {
return errors.Errorf("meta1 addressing records cannot be split: %+v", desc)
}
// Note that both cases 2 and 3 are handled by keys.RangeMetaKey.
//
// 2. the case of the range ending with a meta2 prefix. This means
// the range is full of meta2. We must update the relevant meta1
// entry pointing to the end of this range.
//
// 3. the range ends with a normal user key, so we must update the
// relevant meta2 entry pointing to the end of this range.
action(b, keys.RangeMetaKey(desc.EndKey), desc)
if !bytes.HasPrefix(desc.EndKey, keys.Meta2Prefix) {
// 3a. the range starts with KeyMin or a meta2 addressing record,
// update the meta1 entry for KeyMax.
if bytes.Equal(desc.StartKey, roachpb.RKeyMin) ||
bytes.HasPrefix(desc.StartKey, keys.Meta2Prefix) {
action(b, keys.Meta1KeyMax, desc)
}
}
return nil
}
示例12: getRangeDescriptors
func (db *testDescriptorDB) getRangeDescriptors(key proto.Key,
options lookupOptions) ([]proto.RangeDescriptor, error) {
db.lookupCount++
metadataKey := keys.RangeMetaKey(key)
var err error
// Recursively call into cache as the real DB would, terminating recursion
// when a meta1key is encountered.
if len(metadataKey) > 0 && !bytes.HasPrefix(metadataKey, keys.Meta1Prefix) {
_, err = db.cache.LookupRangeDescriptor(metadataKey, options)
}
return db.getDescriptor(key), err
}
示例13: getCachedRangeDescriptorLocked
// getCachedRangeDescriptorLocked is a helper function to retrieve the
// descriptor of the range which contains the given key, if present in the
// cache. It is assumed that the caller holds a read lock on rdc.rangeCacheMu.
func (rdc *rangeDescriptorCache) getCachedRangeDescriptorLocked(key proto.Key, isReverse bool) (
rangeCacheKey, *proto.RangeDescriptor) {
// The cache is indexed using the end-key of the range, but the
// end-key is non-inclusive.
var metaKey proto.Key
if !isReverse {
// If it is not reverse scan, we access the cache using key.Next().
metaKey = keys.RangeMetaKey(key.Next())
} else {
// Because reverse scan request is begining at end key(exclusive),so we
// access the cache using key directly.
metaKey = keys.RangeMetaKey(key)
}
k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
if !ok {
return nil, nil
}
metaEndKey := k.(rangeCacheKey)
rd := v.(*proto.RangeDescriptor)
// Check that key actually belongs to the range.
if !rd.ContainsKey(keys.KeyAddress(key)) {
// The key is the EndKey of the range in reverse scan, just return the range descriptor.
if isReverse && key.Equal(rd.EndKey) {
return metaEndKey, rd
}
return nil, nil
}
// The key is the StartKey of the range in reverse scan. We need to return the previous range
// descriptor, but it is not in the cache yet.
if isReverse && key.Equal(rd.StartKey) {
return nil, nil
}
return metaEndKey, rd
}
示例14: LookupRange
// LookupRange returns the descriptor of the range containing key.
func (tc *TestCluster) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) {
rangeLookupReq := roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(keys.MustAddr(key)),
},
MaxRanges: 1,
ConsiderIntents: false,
}
resp, pErr := client.SendWrapped(tc.Servers[0].GetDistSender(), nil, &rangeLookupReq)
if pErr != nil {
return roachpb.RangeDescriptor{}, errors.Errorf(
"%q: lookup range unexpected error: %s", key, pErr)
}
return resp.(*roachpb.RangeLookupResponse).Ranges[0], nil
}
示例15: lookupRange
func (c *cluster) lookupRange(nodeIdx int, key roachpb.Key) (*roachpb.RangeDescriptor, error) {
req := &roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(keys.MustAddr(key)),
},
MaxRanges: 1,
ConsiderIntents: false,
}
sender := c.clients[nodeIdx].GetSender()
resp, pErr := client.SendWrapped(sender, nil, req)
if pErr != nil {
return nil, errors.Errorf("%s: lookup range: %s", key, pErr)
}
return &resp.(*roachpb.RangeLookupResponse).Ranges[0], nil
}