本文整理汇总了Golang中github.com/cockroachdb/cockroach/keys.Addr函数的典型用法代码示例。如果您正苦于以下问题:Golang Addr函数的具体用法?Golang Addr怎么用?Golang Addr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Addr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: prev
// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) roachpb.RKey {
candidate := roachpb.RKeyMin
for _, union := range ba.Requests {
h := union.GetInner().Header()
addr := keys.Addr(h.Key)
eAddr := keys.Addr(h.EndKey)
if len(eAddr) == 0 {
// Can probably avoid having to compute Next() here if
// we're in the mood for some more complexity.
eAddr = addr.Next()
}
if !eAddr.Less(k) {
if !k.Less(addr) {
// Range contains k, so won't be able to go lower.
return k
}
// Range is disjoint from [KeyMin,k).
continue
}
// We want the largest surviving candidate.
if candidate.Less(addr) {
candidate = addr
}
}
return candidate
}
示例2: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
meta1 := keys.Addr(keys.RangeMetaKey(meta2))
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, replicaReadTimeout, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return util.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
示例3: metaKey
func metaKey(key roachpb.RKey) []byte {
rk, err := keys.Addr(keys.RangeMetaKey(key))
if err != nil {
panic(err)
}
return rk
}
示例4: prev
// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
candidate := roachpb.RKeyMin
for _, union := range ba.Requests {
h := union.GetInner().Header()
addr, err := keys.Addr(h.Key)
if err != nil {
return nil, err
}
eAddr, err := keys.AddrUpperBound(h.EndKey)
if err != nil {
return nil, err
}
if len(eAddr) == 0 {
eAddr = addr.Next()
}
if !eAddr.Less(k) {
if !k.Less(addr) {
// Range contains k, so won't be able to go lower.
return k, nil
}
// Range is disjoint from [KeyMin,k).
continue
}
// We want the largest surviving candidate.
if candidate.Less(addr) {
candidate = addr
}
}
return candidate, nil
}
示例5: next
// next gives the left boundary of the union of all requests which don't
// affect keys less than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'proto'.
func next(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
candidate := roachpb.RKeyMax
for _, union := range ba.Requests {
h := union.GetInner().Header()
addr, err := keys.Addr(h.Key)
if err != nil {
return nil, err
}
if addr.Less(k) {
eAddr, err := keys.AddrUpperBound(h.EndKey)
if err != nil {
return nil, err
}
if k.Less(eAddr) {
// Starts below k, but continues beyond. Need to stay at k.
return k, nil
}
// Affects only [KeyMin,k).
continue
}
// We want the smallest of the surviving candidates.
if addr.Less(candidate) {
candidate = addr
}
}
return candidate, nil
}
示例6: checkEndTransactionTrigger
// checkEndTransactionTrigger verifies that an EndTransactionRequest
// that includes intents for the SystemDB keys sets the proper trigger.
func checkEndTransactionTrigger(req roachpb.Request, _ roachpb.Header) error {
args, ok := req.(*roachpb.EndTransactionRequest)
if !ok {
return nil
}
if !args.Commit {
// This is a rollback: skip trigger verification.
return nil
}
modifiedSpanTrigger := args.InternalCommitTrigger.GetModifiedSpanTrigger()
modifiedSystemSpan := modifiedSpanTrigger != nil && modifiedSpanTrigger.SystemDBSpan
var hasSystemKey bool
for _, span := range args.IntentSpans {
addr := keys.Addr(span.Key)
if bytes.Compare(addr, keys.SystemDBSpan.Key) >= 0 && bytes.Compare(addr, keys.SystemDBSpan.EndKey) < 0 {
hasSystemKey = true
break
}
}
if hasSystemKey != modifiedSystemSpan {
return util.Errorf("EndTransaction hasSystemKey=%t, but hasSystemDBTrigger=%t",
hasSystemKey, modifiedSystemSpan)
}
return nil
}
示例7: TestKeyAddress
func TestKeyAddress(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
key roachpb.Key
}{
{MakeNameMetadataKey(0, "BAR")},
{MakeNameMetadataKey(1, "BAR")},
{MakeNameMetadataKey(1, "foo")},
{MakeNameMetadataKey(2, "foo")},
{MakeDescMetadataKey(123)},
{MakeDescMetadataKey(124)},
}
var lastKey roachpb.Key
for i, test := range testCases {
resultAddr, err := keys.Addr(test.key)
if err != nil {
t.Fatal(err)
}
result := resultAddr.AsRawKey()
if result.Compare(lastKey) <= 0 {
t.Errorf("%d: key address %q is <= %q", i, result, lastKey)
}
lastKey = result
}
}
示例8: TestObjectIDForKey
func TestObjectIDForKey(t *testing.T) {
defer leaktest.AfterTest(t)
testCases := []struct {
key roachpb.RKey
success bool
id uint32
}{
// Before the structured span.
{roachpb.RKeyMin, false, 0},
{keys.Addr(keys.SystemMax), false, 0},
// Boundaries of structured span.
{keys.Addr(keys.TableDataPrefix), false, 0},
{roachpb.RKeyMax, false, 0},
// In system span, but no Uvarint ID.
{keys.MakeKey(keys.TableDataPrefix, roachpb.RKey("foo")), false, 0},
// Valid, even if there are things after the ID.
{keys.MakeKey(keys.MakeTablePrefix(42), roachpb.RKey("foo")), true, 42},
{keys.MakeTablePrefix(0), true, 0},
{keys.MakeTablePrefix(999), true, 999},
}
for tcNum, tc := range testCases {
id, success := config.ObjectIDForKey(tc.key)
if success != tc.success {
t.Errorf("#%d: expected success=%t", tcNum, tc.success)
continue
}
if id != tc.id {
t.Errorf("#%d: expected id=%d, got %d", tcNum, tc.id, id)
}
}
}
示例9: SplitRange
// SplitRange splits the range containing splitKey.
// The right range created by the split starts at the split key and extends to the
// original range's end key.
// Returns the new descriptors of the left and right ranges.
//
// splitKey must correspond to a SQL table key (it must end with a family ID /
// col ID).
func (tc *TestCluster) SplitRange(
splitKey roachpb.Key,
) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, error) {
splitRKey, err := keys.Addr(splitKey)
if err != nil {
return nil, nil, err
}
origRangeDesc, err := tc.LookupRange(splitKey)
if err != nil {
return nil, nil, err
}
if origRangeDesc.StartKey.Equal(splitRKey) {
return nil, nil, errors.Errorf(
"cannot split range %+v at start key %q", origRangeDesc, splitKey)
}
splitReq := roachpb.AdminSplitRequest{
Span: roachpb.Span{
Key: splitKey,
},
SplitKey: splitKey,
}
_, pErr := client.SendWrapped(tc.Servers[0].GetDistSender(), nil, &splitReq)
if pErr != nil {
return nil, nil, errors.Errorf(
"%q: split unexpected error: %s", splitReq.SplitKey, pErr)
}
leftRangeDesc := new(roachpb.RangeDescriptor)
rightRangeDesc := new(roachpb.RangeDescriptor)
if err := tc.Servers[0].DB().GetProto(
keys.RangeDescriptorKey(origRangeDesc.StartKey), leftRangeDesc); err != nil {
return nil, nil, errors.Wrap(err, "could not look up left-hand side descriptor")
}
// The split point might not be exactly the one we requested (it can be
// adjusted slightly so we don't split in the middle of SQL rows). Update it
// to the real point.
splitRKey = leftRangeDesc.EndKey
if err := tc.Servers[0].DB().GetProto(
keys.RangeDescriptorKey(splitRKey), rightRangeDesc); err != nil {
return nil, nil, errors.Wrap(err, "could not look up right-hand side descriptor")
}
return leftRangeDesc, rightRangeDesc, nil
}
示例10: checkEndTransactionTrigger
// checkEndTransactionTrigger verifies that an EndTransactionRequest
// that includes intents for the SystemDB keys sets the proper trigger.
func checkEndTransactionTrigger(args storagebase.FilterArgs) *roachpb.Error {
req, ok := args.Req.(*roachpb.EndTransactionRequest)
if !ok {
return nil
}
if !req.Commit {
// This is a rollback: skip trigger verification.
return nil
}
modifiedSpanTrigger := req.InternalCommitTrigger.GetModifiedSpanTrigger()
modifiedSystemConfigSpan := modifiedSpanTrigger != nil && modifiedSpanTrigger.SystemConfigSpan
var hasSystemKey bool
for _, span := range req.IntentSpans {
keyAddr, err := keys.Addr(span.Key)
if err != nil {
return roachpb.NewError(err)
}
if bytes.Compare(keyAddr, keys.SystemConfigSpan.Key) >= 0 &&
bytes.Compare(keyAddr, keys.SystemConfigSpan.EndKey) < 0 {
hasSystemKey = true
break
}
}
// If the transaction in question has intents in the system span, then
// modifiedSystemConfigSpan should always be true. However, it is possible
// for modifiedSystemConfigSpan to be set, even though no system keys are
// present. This can occur with certain conditional DDL statements (e.g.
// "CREATE TABLE IF NOT EXISTS"), which set the SystemConfigTrigger
// aggressively but may not actually end up changing the system DB depending
// on the current state.
// For more information, see the related comment at the beginning of
// planner.makePlan().
if hasSystemKey && !modifiedSystemConfigSpan {
return roachpb.NewError(util.Errorf("EndTransaction hasSystemKey=%t, but hasSystemConfigTrigger=%t",
hasSystemKey, modifiedSystemConfigSpan))
}
return nil
}
示例11: runLsRanges
func runLsRanges(cmd *cobra.Command, args []string) {
if len(args) > 1 {
mustUsage(cmd)
return
}
var startKey roachpb.Key
{
k := roachpb.KeyMin.Next()
if len(args) > 0 {
k = roachpb.Key(args[0])
}
rk, err := keys.Addr(k)
if err != nil {
panic(err)
}
startKey = keys.RangeMetaKey(rk)
}
endKey := keys.Meta2Prefix.PrefixEnd()
kvDB, stopper := makeDBClient()
defer stopper.Stop()
rows, err := kvDB.Scan(startKey, endKey, maxResults)
if err != nil {
panicf("scan failed: %s\n", err)
}
for _, row := range rows {
desc := &roachpb.RangeDescriptor{}
if err := row.ValueProto(desc); err != nil {
panicf("%s: unable to unmarshal range descriptor\n", row.Key)
continue
}
fmt.Printf("%s-%s [%d]\n", desc.StartKey, desc.EndKey, desc.RangeID)
for i, replica := range desc.Replicas {
fmt.Printf("\t%d: node-id=%d store-id=%d\n",
i, replica.NodeID, replica.StoreID)
}
}
fmt.Printf("%d result(s)\n", len(rows))
}
示例12: doLookupWithToken
func doLookupWithToken(
t *testing.T,
rc *rangeDescriptorCache,
key string,
evictToken *evictionToken,
considerIntents bool,
useReverseScan bool,
wg *sync.WaitGroup,
) (*roachpb.RangeDescriptor, *evictionToken) {
r, returnToken, pErr := rc.lookupRangeDescriptorInternal(
context.Background(), roachpb.RKey(key), evictToken, considerIntents, useReverseScan, wg)
if pErr != nil {
t.Fatalf("Unexpected error from LookupRangeDescriptor: %s", pErr)
}
keyAddr, err := keys.Addr(roachpb.Key(key))
if err != nil {
t.Fatal(err)
}
if (useReverseScan && !r.ContainsExclusiveEndKey(keyAddr)) || (!useReverseScan && !r.ContainsKey(keyAddr)) {
t.Fatalf("Returned range did not contain key: %s-%s, %s", r.StartKey, r.EndKey, key)
}
return r, returnToken
}
示例13: TestStoreRangeSplitIdempotency
// TestStoreRangeSplit executes a split of a range and verifies that the
// resulting ranges respond to the right key ranges and that their stats
// and sequence cache have been properly accounted for.
func TestStoreRangeSplitIdempotency(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
defer stopper.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
content := roachpb.Key("asdvb")
// First, write some values left and right of the proposed split key.
pArgs := putArgs([]byte("c"), content)
if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("x"), content)
if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
t.Fatal(err)
}
// Increments are a good way of testing the sequence cache. Up here, we
// address them to the original range, then later to the one that contains
// the key.
txn := roachpb.NewTransaction("test", []byte("c"), 10, roachpb.SERIALIZABLE,
store.Clock().Now(), 0)
lIncArgs := incrementArgs([]byte("apoptosis"), 100)
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
Txn: txn,
}, &lIncArgs); err != nil {
t.Fatal(err)
}
rIncArgs := incrementArgs([]byte("wobble"), 10)
txn.Sequence++
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
Txn: txn,
}, &rIncArgs); err != nil {
t.Fatal(err)
}
// Get the original stats for key and value bytes.
var ms engine.MVCCStats
if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
t.Fatal(err)
}
keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
// Split the range.
args := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
// Verify no intents remains on range descriptor keys.
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(roachpb.RKeyMin), keys.RangeDescriptorKey(keys.Addr(splitKey))} {
if _, _, err := engine.MVCCGet(store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
t.Fatal(err)
}
}
rng := store.LookupReplica(roachpb.RKeyMin, nil)
newRng := store.LookupReplica([]byte("m"), nil)
if !bytes.Equal(newRng.Desc().StartKey, splitKey) || !bytes.Equal(splitKey, rng.Desc().EndKey) {
t.Errorf("ranges mismatched, wanted %q=%q=%q", newRng.Desc().StartKey, splitKey, rng.Desc().EndKey)
}
if !bytes.Equal(newRng.Desc().EndKey, roachpb.RKeyMax) || !bytes.Equal(rng.Desc().StartKey, roachpb.RKeyMin) {
t.Errorf("new ranges do not cover KeyMin-KeyMax, but only %q-%q", rng.Desc().StartKey, newRng.Desc().EndKey)
}
// Try to get values from both left and right of where the split happened.
gArgs := getArgs([]byte("c"))
if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("x"))
if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: newRng.Desc().RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Send out an increment request copied from above (same txn/sequence)
// which remains in the old range.
_, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
Txn: txn,
}, &lIncArgs)
if _, ok := err.(*roachpb.TransactionRetryError); !ok {
t.Fatalf("unexpected sequence cache miss: %v", err)
}
// Send out the same increment copied from above (same txn/sequence), but
// now to the newly created range (which should hold that key).
//.........这里部分代码省略.........
示例14: meta
func meta(k roachpb.RKey) roachpb.RKey {
return keys.Addr(keys.RangeMetaKey(k))
}
示例15: TestAcceptsUnsplitRanges
// TestAcceptsUnsplitRanges verifies that ranges that need to split are properly
// rejected when the queue has 'acceptsUnsplitRanges = false'.
func TestAcceptsUnsplitRanges(t *testing.T) {
defer leaktest.AfterTest(t)
g, stopper := gossipForTest(t)
defer stopper.Stop()
// This range can never be split due to zone configs boundaries.
neverSplits := &Replica{}
if err := neverSplits.setDesc(&roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKeyMin,
EndKey: keys.Addr(keys.UserTableDataMin),
}); err != nil {
t.Fatal(err)
}
// This range will need to be split after user db/table entries are created.
willSplit := &Replica{}
if err := willSplit.setDesc(&roachpb.RangeDescriptor{
RangeID: 2,
StartKey: keys.Addr(keys.UserTableDataMin),
EndKey: roachpb.RKeyMax,
}); err != nil {
t.Fatal(err)
}
var queued int32
testQueue := &testQueueImpl{
shouldQueueFn: func(now roachpb.Timestamp, r *Replica) (shouldQueue bool, priority float64) {
// Always queue ranges if they make it past the base queue's logic.
atomic.AddInt32(&queued, 1)
return true, float64(r.Desc().RangeID)
},
acceptUnsplit: false,
}
bq := makeBaseQueue("test", testQueue, g, 2)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
bq.Start(clock, stopper)
// Check our config.
sysCfg := g.GetSystemConfig()
if sysCfg == nil {
t.Fatal("nil config")
}
if sysCfg.NeedsSplit(neverSplits.Desc().StartKey, neverSplits.Desc().EndKey) {
t.Fatal("System config says range needs to be split")
}
if sysCfg.NeedsSplit(willSplit.Desc().StartKey, willSplit.Desc().EndKey) {
t.Fatal("System config says range needs to be split")
}
// There are no user db/table entries, everything should be added and
// processed as usual.
bq.MaybeAdd(neverSplits, roachpb.ZeroTimestamp)
bq.MaybeAdd(willSplit, roachpb.ZeroTimestamp)
if err := util.IsTrueWithin(func() bool {
return atomic.LoadInt32(&testQueue.processed) == 2
}, 250*time.Millisecond); err != nil {
t.Error(err)
}
if pc := atomic.LoadInt32(&queued); pc != 2 {
t.Errorf("expected queued count of 2; got %d", pc)
}
// Now add a user object, it will trigger a split.
// The range willSplit starts at the beginning of the user data range,
// which means keys.MaxReservedDescID+1.
config.TestingSetZoneConfig(keys.MaxReservedDescID+2, &config.ZoneConfig{RangeMaxBytes: 1 << 20})
// Check our config.
if sysCfg.NeedsSplit(neverSplits.Desc().StartKey, neverSplits.Desc().EndKey) {
t.Fatal("System config says range needs to be split")
}
if !sysCfg.NeedsSplit(willSplit.Desc().StartKey, willSplit.Desc().EndKey) {
t.Fatal("System config says range does not need to be split")
}
bq.MaybeAdd(neverSplits, roachpb.ZeroTimestamp)
bq.MaybeAdd(willSplit, roachpb.ZeroTimestamp)
if err := util.IsTrueWithin(func() bool {
return atomic.LoadInt32(&testQueue.processed) == 3
}, 250*time.Millisecond); err != nil {
t.Error(err)
}
if pc := atomic.LoadInt32(&queued); pc != 3 {
t.Errorf("expected queued count of 3; got %d", pc)
}
}