本文整理匯總了Golang中github.com/cockroachdb/cockroach/roachpb.RKey類的典型用法代碼示例。如果您正苦於以下問題:Golang RKey類的具體用法?Golang RKey怎麽用?Golang RKey使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了RKey類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: prev
// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) roachpb.RKey {
candidate := roachpb.RKeyMin
for _, union := range ba.Requests {
h := union.GetInner().Header()
addr := keys.Addr(h.Key)
eAddr := keys.Addr(h.EndKey)
if len(eAddr) == 0 {
// Can probably avoid having to compute Next() here if
// we're in the mood for some more complexity.
eAddr = addr.Next()
}
if !eAddr.Less(k) {
if !k.Less(addr) {
// Range contains k, so won't be able to go lower.
return k
}
// Range is disjoint from [KeyMin,k).
continue
}
// We want the largest surviving candidate.
if candidate.Less(addr) {
candidate = addr
}
}
return candidate
}
示例2: next
// next gives the left boundary of the union of all requests which don't
// affect keys less than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'proto'.
func next(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
candidate := roachpb.RKeyMax
for _, union := range ba.Requests {
h := union.GetInner().Header()
addr, err := keys.Addr(h.Key)
if err != nil {
return nil, err
}
if addr.Less(k) {
eAddr, err := keys.AddrUpperBound(h.EndKey)
if err != nil {
return nil, err
}
if k.Less(eAddr) {
// Starts below k, but continues beyond. Need to stay at k.
return k, nil
}
// Affects only [KeyMin,k).
continue
}
// We want the smallest of the surviving candidates.
if addr.Less(candidate) {
candidate = addr
}
}
return candidate, nil
}
示例3: RangeLookup
// RangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
// TODO(tschottdorf) consider a Trace here, potentially that of the request
// that had the cache miss and waits for the result.
br, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, ba)
if err != nil {
return nil, err
}
if br.Error != nil {
return nil, br.Error
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例4: RangeLookup
// RangeLookup implements the RangeDescriptorDB interface.
// RangeLookup dispatches a RangeLookup request for the given metadata
// key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(
key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool,
) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
replicas.Shuffle()
// TODO(tschottdorf): Ideally we would use the trace of the request which
// caused this lookup.
_ = context.TODO()
br, err := ds.sendRPC(ds.Ctx, desc.RangeID, replicas, ba)
if err != nil {
return nil, nil, roachpb.NewError(err)
}
if br.Error != nil {
return nil, nil, br.Error
}
resp := br.Responses[0].GetInner().(*roachpb.RangeLookupResponse)
return resp.Ranges, resp.PrefetchedRanges, nil
}
示例5: prev
// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
candidate := roachpb.RKeyMin
for _, union := range ba.Requests {
h := union.GetInner().Header()
addr, err := keys.Addr(h.Key)
if err != nil {
return nil, err
}
eAddr, err := keys.AddrUpperBound(h.EndKey)
if err != nil {
return nil, err
}
if len(eAddr) == 0 {
eAddr = addr.Next()
}
if !eAddr.Less(k) {
if !k.Less(addr) {
// Range contains k, so won't be able to go lower.
return k, nil
}
// Range is disjoint from [KeyMin,k).
continue
}
// We want the largest surviving candidate.
if candidate.Less(addr) {
candidate = addr
}
}
return candidate, nil
}
示例6: getCachedRangeDescriptorLocked
// getCachedRangeDescriptorLocked is a helper function to retrieve the
// descriptor of the range which contains the given key, if present in the
// cache. It is assumed that the caller holds a read lock on rdc.rangeCache.
func (rdc *rangeDescriptorCache) getCachedRangeDescriptorLocked(key roachpb.RKey, inclusive bool) (rangeCacheKey, *roachpb.RangeDescriptor, error) {
// The cache is indexed using the end-key of the range, but the
// end-key is non-inclusive by default.
var metaKey roachpb.RKey
var err error
if !inclusive {
metaKey, err = meta(key.Next())
} else {
metaKey, err = meta(key)
}
if err != nil {
return nil, nil, err
}
k, v, ok := rdc.rangeCache.cache.Ceil(rangeCacheKey(metaKey))
if !ok {
return nil, nil, nil
}
metaEndKey := k.(rangeCacheKey)
rd := v.(*roachpb.RangeDescriptor)
// Return nil if the key does not belong to the range.
if (!inclusive && !rd.ContainsKey(key)) || (inclusive && !rd.ContainsExclusiveEndKey(key)) {
return nil, nil, nil
}
return metaEndKey, rd, nil
}
示例7: getDescriptors
// getDescriptors looks up the range descriptor to use for a query over the
// key range [from,to), with the given lookupOptions. The range descriptor
// which contains the range in which the request should start its query is
// returned first; the returned bool is true in case the given range reaches
// outside the first descriptor.
// In case either of the descriptors is discovered stale, the returned closure
// should be called; it evicts the cache appropriately.
// Note that `from` and `to` are not necessarily Key and EndKey from a
// RequestHeader; it's assumed that they've been translated to key addresses
// already (via KeyAddress).
func (ds *DistSender) getDescriptors(from, to roachpb.RKey, options lookupOptions) (*roachpb.RangeDescriptor, bool, func(), *roachpb.Error) {
var desc *roachpb.RangeDescriptor
var err error
var descKey roachpb.RKey
if !options.useReverseScan {
descKey = from
} else {
descKey = to
}
desc, err = ds.rangeCache.LookupRangeDescriptor(descKey, options)
if err != nil {
return nil, false, nil, roachpb.NewError(err)
}
// Checks whether need to get next range descriptor. If so, returns true.
needAnother := func(desc *roachpb.RangeDescriptor, isReverse bool) bool {
if isReverse {
return from.Less(desc.StartKey)
}
return desc.EndKey.Less(to)
}
evict := func() {
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, options.useReverseScan)
}
return desc, needAnother(desc, options.useReverseScan), evict, nil
}
示例8: RangeLookup
// RangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
trace := ds.Tracer.StartSpan("range lookup")
defer trace.Finish()
// TODO(tschottdorf): Ideally we would use the trace of the request which
// caused this lookup instead of a new one.
br, err := ds.sendRPC(trace, desc.RangeID, replicas, orderRandom, ba)
if err != nil {
return nil, err
}
if br.Error != nil {
return nil, br.Error
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例9: lookupReplica
// lookupReplica looks up replica by key [range]. Lookups are done
// by consulting each store in turn via Store.LookupRange(key).
// Returns RangeID and replica on success; RangeKeyMismatch error
// if not found.
// This is only for testing usage; performance doesn't matter.
func (ls *Stores) lookupReplica(start, end roachpb.RKey) (rangeID roachpb.RangeID, replica *roachpb.ReplicaDescriptor, pErr *roachpb.Error) {
ls.mu.RLock()
defer ls.mu.RUnlock()
var rng *Replica
for _, store := range ls.storeMap {
rng = store.LookupReplica(start, end)
if rng == nil {
if tmpRng := store.LookupReplica(start, nil); tmpRng != nil {
log.Warningf(fmt.Sprintf("range not contained in one range: [%s,%s), but have [%s,%s)", start, end, tmpRng.Desc().StartKey, tmpRng.Desc().EndKey))
}
continue
}
if replica == nil {
rangeID = rng.RangeID
replica = rng.GetReplica()
continue
}
// Should never happen outside of tests.
return 0, nil, roachpb.NewErrorf(
"range %+v exists on additional store: %+v", rng, store)
}
if replica == nil {
pErr = roachpb.NewError(roachpb.NewRangeKeyMismatchError(start.AsRawKey(), end.AsRawKey(), nil))
}
return rangeID, replica, pErr
}
示例10: clearOverlappingCachedRangeDescriptors
// clearOverlappingCachedRangeDescriptors looks up and clears any
// cache entries which overlap the specified key or descriptor.
func (rdc *rangeDescriptorCache) clearOverlappingCachedRangeDescriptors(key, metaKey roachpb.RKey, desc *roachpb.RangeDescriptor) {
if desc.StartKey.Equal(desc.EndKey) { // True for some unittests.
return
}
// Clear out any descriptors which subsume the key which we're going
// to cache. For example, if an existing KeyMin->KeyMax descriptor
// should be cleared out in favor of a KeyMin->"m" descriptor.
k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
if ok {
descriptor := v.(*roachpb.RangeDescriptor)
if !key.Less(descriptor.StartKey) && !descriptor.EndKey.Less(key) {
if log.V(1) {
log.Infof("clearing overlapping descriptor: key=%s desc=%s", k, descriptor)
}
rdc.rangeCache.Del(k.(rangeCacheKey))
}
}
// Also clear any descriptors which are subsumed by the one we're
// going to cache. This could happen on a merge (and also happens
// when there's a lot of concurrency). Iterate from the range meta key
// after RangeMetaKey(desc.StartKey) to the range meta key for desc.EndKey.
rdc.rangeCache.DoRange(func(k, v interface{}) {
if log.V(1) {
log.Infof("clearing subsumed descriptor: key=%s desc=%s", k, v.(*roachpb.RangeDescriptor))
}
rdc.rangeCache.Del(k.(rangeCacheKey))
}, rangeCacheKey(meta(desc.StartKey).Next()),
rangeCacheKey(meta(desc.EndKey)))
}
示例11: lookupReplica
// lookupReplica looks up replica by key [range]. Lookups are done
// by consulting each store in turn via Store.LookupRange(key).
// Returns RangeID and replica on success; RangeKeyMismatch error
// if not found.
// This is only for testing usage; performance doesn't matter.
func (ls *Stores) lookupReplica(start, end roachpb.RKey) (rangeID roachpb.RangeID, replica *roachpb.ReplicaDescriptor, err error) {
ls.mu.RLock()
defer ls.mu.RUnlock()
var rng *Replica
var partialDesc *roachpb.RangeDescriptor
for _, store := range ls.storeMap {
rng = store.LookupReplica(start, end)
if rng == nil {
if tmpRng := store.LookupReplica(start, nil); tmpRng != nil {
log.Warningf("range not contained in one range: [%s,%s), but have [%s,%s)",
start, end, tmpRng.Desc().StartKey, tmpRng.Desc().EndKey)
partialDesc = tmpRng.Desc()
break
}
continue
}
if replica == nil {
rangeID = rng.RangeID
replica, err = rng.GetReplica()
if err != nil {
if _, ok := err.(*errReplicaNotInRange); !ok {
return 0, nil, err
}
}
continue
}
// Should never happen outside of tests.
return 0, nil, util.Errorf(
"range %+v exists on additional store: %+v", rng, store)
}
if replica == nil {
err = roachpb.NewRangeKeyMismatchError(start.AsRawKey(), end.AsRawKey(), partialDesc)
}
return rangeID, replica, err
}
示例12: getNode
func getNode(t *testing.T, nodes map[string]storage.RangeTreeNode, testName string, key roachpb.RKey) (storage.RangeTreeNode, bool) {
if key != nil {
if node, ok := nodes[key.String()]; !ok {
t.Errorf("%s: could not locate node with key %s", testName, key)
} else {
return node, ok
}
}
return storage.RangeTreeNode{}, false
}
示例13: ComputeSplitKeys
// ComputeSplitKeys takes a start and end key and returns an array of keys
// at which to split the span [start, end).
// The only required splits are at each user table prefix.
func (s SystemConfig) ComputeSplitKeys(startKey, endKey roachpb.RKey) []roachpb.RKey {
testingLock.Lock()
tableSplitsDisabled := testingDisableTableSplits
testingLock.Unlock()
if tableSplitsDisabled {
return nil
}
tableStart := roachpb.RKey(keys.UserTableDataMin)
if !tableStart.Less(endKey) {
// This range is before the user tables span: no required splits.
return nil
}
startID, ok := ObjectIDForKey(startKey)
if !ok || startID <= keys.MaxReservedDescID {
// The start key is either:
// - not part of the structured data span
// - part of the system span
// In either case, start looking for splits at the first ID usable
// by the user data span.
startID = keys.MaxReservedDescID + 1
} else {
// The start key is either already a split key, or after the split
// key for its ID. We can skip straight to the next one.
startID++
}
// Find the largest object ID.
// We can't keep splitting until we reach endKey as it could be roachpb.KeyMax.
endID, err := s.GetLargestObjectID()
if err != nil {
log.Errorf("unable to determine largest object ID from system config: %s", err)
return nil
}
// Build key prefixes for sequential table IDs until we reach endKey.
var splitKeys []roachpb.RKey
var key roachpb.RKey
// endID could be smaller than startID if we don't have user tables.
for id := startID; id <= endID; id++ {
key = keys.MakeTablePrefix(id)
// Skip if the range starts on a split key.
if !startKey.Less(key) {
continue
}
// Handle the case where EndKey is already a table prefix.
if !key.Less(endKey) {
break
}
splitKeys = append(splitKeys, key)
}
return splitKeys
}
示例14: ObjectIDForKey
// ObjectIDForKey returns the object ID (table or database) for 'key',
// or (_, false) if not within the structured key space.
func ObjectIDForKey(key roachpb.RKey) (uint32, bool) {
if key.Equal(roachpb.RKeyMax) {
return 0, false
}
if encoding.PeekType(key) != encoding.Int {
// TODO(marc): this should eventually return SystemDatabaseID.
return 0, false
}
// Consume first encoded int.
_, id64, err := encoding.DecodeUvarint(key)
return uint32(id64), err == nil
}
示例15: LookupReplica
// LookupReplica looks up replica by key [range]. Lookups are done
// by consulting each store in turn via Store.LookupReplica(key).
// Returns RangeID and replica on success; RangeKeyMismatch error
// if not found.
// If end is nil, a replica containing start is looked up.
// This is only for testing usage; performance doesn't matter.
func (ls *Stores) LookupReplica(
start, end roachpb.RKey,
) (roachpb.RangeID, roachpb.ReplicaDescriptor, error) {
ls.mu.RLock()
defer ls.mu.RUnlock()
var rangeID roachpb.RangeID
var repDesc roachpb.ReplicaDescriptor
var repDescFound bool
for _, store := range ls.storeMap {
replica := store.LookupReplica(start, nil)
if replica == nil {
continue
}
// Verify that the descriptor contains the entire range.
if desc := replica.Desc(); !desc.ContainsKeyRange(start, end) {
log.Warningf(context.TODO(), "range not contained in one range: [%s,%s), but have [%s,%s)",
start, end, desc.StartKey, desc.EndKey)
err := roachpb.NewRangeKeyMismatchError(start.AsRawKey(), end.AsRawKey(), desc)
return 0, roachpb.ReplicaDescriptor{}, err
}
rangeID = replica.RangeID
var err error
repDesc, err = replica.GetReplicaDescriptor()
if err != nil {
if _, ok := err.(*roachpb.RangeNotFoundError); ok {
// We are not holding a lock across this block; the replica could have
// been removed from the range (via down-replication) between the
// LookupReplica and the GetReplicaDescriptor calls. In this case just
// ignore this replica.
continue
}
return 0, roachpb.ReplicaDescriptor{}, err
}
if repDescFound {
// We already found the range; this should never happen outside of tests.
err := errors.Errorf("range %+v exists on additional store: %+v", replica, store)
return 0, roachpb.ReplicaDescriptor{}, err
}
repDescFound = true
}
if !repDescFound {
return 0, roachpb.ReplicaDescriptor{}, roachpb.NewRangeNotFoundError(0)
}
return rangeID, repDesc, nil
}