本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/internal/client.SendWrappedWith函数的典型用法代码示例。如果您正苦于以下问题:Golang SendWrappedWith函数的具体用法?Golang SendWrappedWith怎么用?Golang SendWrappedWith使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SendWrappedWith函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestTxnMultipleCoord
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
testCases := []struct {
args roachpb.Request
writing bool
ok bool
}{
{roachpb.NewGet(roachpb.Key("a")), true, false},
{roachpb.NewGet(roachpb.Key("a")), false, true},
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false, false}, // transactional write before begin
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true, false}, // must have switched coordinators
}
for i, tc := range testCases {
txn := roachpb.NewTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE,
s.Clock.Now(), s.Clock.MaxOffset().Nanoseconds())
txn.Writing = tc.writing
reply, pErr := client.SendWrappedWith(context.Background(), sender, roachpb.Header{
Txn: txn,
}, tc.args)
if pErr == nil != tc.ok {
t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
i, tc.args, tc.writing, tc.ok, pErr)
}
if pErr != nil {
continue
}
txn = reply.Header().Txn
// The transaction should come back rw if it started rw or if we just
// wrote.
isWrite := roachpb.IsTransactionWrite(tc.args)
if (tc.writing || isWrite) != txn.Writing {
t.Errorf("%d: unexpected writing state: %s", i, txn)
}
if !isWrite {
continue
}
// Abort for clean shutdown.
if _, pErr := client.SendWrappedWith(context.Background(), sender, roachpb.Header{
Txn: txn,
}, &roachpb.EndTransactionRequest{
Commit: false,
}); pErr != nil {
t.Fatal(pErr)
}
}
}
示例2: TestRejectFutureCommand
// TestRejectFutureCommand verifies that lease holders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, 100*time.Millisecond)
mtc := &multiTestContext{clock: clock}
mtc.Start(t, 1)
defer mtc.Stop()
ts1 := clock.Now()
key := roachpb.Key("a")
incArgs := incrementArgs(key, 5)
// Commands with a future timestamp that is within the MaxOffset
// bound will be accepted and will cause the clock to advance.
const numCmds = 3
clockOffset := clock.MaxOffset() / numCmds
for i := int64(1); i <= numCmds; i++ {
ts := ts1.Add(i*clockOffset.Nanoseconds(), 0)
if _, err := client.SendWrappedWith(context.Background(), rg1(mtc.stores[0]), roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
}
ts2 := clock.Now()
if expAdvance, advance := ts2.GoTime().Sub(ts1.GoTime()), numCmds*clockOffset; advance != expAdvance {
t.Fatalf("expected clock to advance %s; got %s", expAdvance, advance)
}
// Once the accumulated offset reaches MaxOffset, commands will be rejected.
_, pErr := client.SendWrappedWith(context.Background(), rg1(mtc.stores[0]), roachpb.Header{Timestamp: ts1.Add(clock.MaxOffset().Nanoseconds()+1, 0)}, &incArgs)
if !testutils.IsPError(pErr, "rejecting command with timestamp in the future") {
t.Fatalf("unexpected error %v", pErr)
}
// The clock did not advance and the final command was not executed.
ts3 := clock.Now()
if advance := ts3.GoTime().Sub(ts2.GoTime()); advance != 0 {
t.Fatalf("expected clock not to advance, but it advanced by %s", advance)
}
val, _, err := engine.MVCCGet(context.Background(), mtc.engines[0], key, ts3, true, nil)
if err != nil {
t.Fatal(err)
}
if a, e := mustGetInt(val), incArgs.Increment*numCmds; a != e {
t.Errorf("expected %d, got %d", e, a)
}
}
示例3: process
// process synchronously invokes admin split for each proposed split key.
func (sq *splitQueue) process(
ctx context.Context, now hlc.Timestamp, r *Replica, sysCfg config.SystemConfig,
) error {
// First handle case of splitting due to zone config maps.
desc := r.Desc()
splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)
if len(splitKeys) > 0 {
log.Infof(ctx, "splitting at keys %v", splitKeys)
for _, splitKey := range splitKeys {
if err := sq.db.AdminSplit(ctx, splitKey.AsRawKey()); err != nil {
return errors.Errorf("unable to split %s at key %q: %s", r, splitKey, err)
}
}
return nil
}
// Next handle case of splitting due to size.
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return err
}
size := r.GetMVCCStats().Total()
// FIXME: why is this implementation not the same as the one above?
if float64(size)/float64(zone.RangeMaxBytes) > 1 {
log.Infof(ctx, "splitting size=%d max=%d", size, zone.RangeMaxBytes)
if _, pErr := client.SendWrappedWith(ctx, r, roachpb.Header{
Timestamp: now,
}, &roachpb.AdminSplitRequest{
Span: roachpb.Span{Key: desc.StartKey.AsRawKey()},
}); pErr != nil {
return pErr.GoError()
}
}
return nil
}
示例4: TestComputeStatsForKeySpan
func TestComputeStatsForKeySpan(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
defer mtc.Stop()
mtc.Start(t, 3)
// Create a number of ranges using splits.
splitKeys := []string{"a", "c", "e", "g", "i"}
for _, k := range splitKeys {
key := []byte(k)
repl := mtc.stores[0].LookupReplica(key, roachpb.RKeyMin)
args := adminSplitArgs(key, key)
header := roachpb.Header{
RangeID: repl.RangeID,
}
if _, err := client.SendWrappedWith(context.Background(), mtc.stores[0], header, args); err != nil {
t.Fatal(err)
}
}
// Wait for splits to finish.
testutils.SucceedsSoon(t, func() error {
repl := mtc.stores[0].LookupReplica(roachpb.RKey("z"), nil)
if actualRSpan := repl.Desc().RSpan(); !actualRSpan.Key.Equal(roachpb.RKey("i")) {
return errors.Errorf("expected range %s to begin at key 'i'", repl)
}
return nil
})
// Create some keys across the ranges.
incKeys := []string{"b", "bb", "bbb", "d", "dd", "h"}
for _, k := range incKeys {
if _, err := mtc.dbs[0].Inc(context.TODO(), []byte(k), 5); err != nil {
t.Fatal(err)
}
}
// Verify stats across different spans.
for _, tcase := range []struct {
startKey string
endKey string
expectedRanges int
expectedKeys int64
}{
{"a", "i", 4, 6},
{"a", "c", 1, 3},
{"b", "e", 2, 5},
{"e", "i", 2, 1},
} {
start, end := tcase.startKey, tcase.endKey
stats, count := mtc.stores[0].ComputeStatsForKeySpan(
roachpb.RKey(start), roachpb.RKey(end))
if a, e := count, tcase.expectedRanges; a != e {
t.Errorf("Expected %d ranges in span [%s - %s], found %d", e, start, end, a)
}
if a, e := stats.LiveCount, tcase.expectedKeys; a != e {
t.Errorf("Expected %d keys in span [%s - %s], found %d", e, start, end, a)
}
}
}
示例5: TestRejectFutureCommand
// TestRejectFutureCommand verifies that lease holders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
defer leaktest.AfterTest(t)()
const maxOffset = 100 * time.Millisecond
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(maxOffset)
mtc := &multiTestContext{clock: clock}
mtc.Start(t, 1)
defer mtc.Stop()
startTime := manual.UnixNano()
// Commands with a future timestamp that is within the MaxOffset
// bound will be accepted and will cause the clock to advance.
for i := int64(0); i < 3; i++ {
incArgs := incrementArgs([]byte("a"), 5)
ts := hlc.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0)
if _, err := client.SendWrappedWith(context.Background(), rg1(mtc.stores[0]), roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
}
if now := clock.Now(); now.WallTime != int64(90*time.Millisecond) {
t.Fatalf("expected clock to advance to 90ms; got %s", now)
}
// Once the accumulated offset reaches MaxOffset, commands will be rejected.
incArgs := incrementArgs([]byte("a"), 11)
ts := hlc.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0)
if _, err := client.SendWrappedWith(context.Background(), rg1(mtc.stores[0]), roachpb.Header{Timestamp: ts}, &incArgs); err == nil {
t.Fatalf("expected clock offset error but got nil")
}
// The clock remained at 90ms and the final command was not executed.
if now := clock.Now(); now.WallTime != int64(90*time.Millisecond) {
t.Errorf("expected clock to stay at 90ms; got %s", now)
}
val, _, err := engine.MVCCGet(context.Background(), mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil)
if err != nil {
t.Fatal(err)
}
if v := mustGetInt(val); v != 15 {
t.Errorf("expected 15, got %v", v)
}
}
示例6: TestRangeCommandClockUpdate
// TestRangeCommandClockUpdate verifies that followers update their
// clocks when executing a command, even if the lease holder's clock is far
// in the future.
func TestRangeCommandClockUpdate(t *testing.T) {
defer leaktest.AfterTest(t)()
const numNodes = 3
var manuals []*hlc.ManualClock
var clocks []*hlc.Clock
for i := 0; i < numNodes; i++ {
manuals = append(manuals, hlc.NewManualClock(1))
clocks = append(clocks, hlc.NewClock(manuals[i].UnixNano))
clocks[i].SetMaxOffset(100 * time.Millisecond)
}
mtc := &multiTestContext{clocks: clocks}
mtc.Start(t, numNodes)
defer mtc.Stop()
mtc.replicateRange(1, 1, 2)
// Advance the lease holder's clock ahead of the followers (by more than
// MaxOffset but less than the range lease) and execute a command.
manuals[0].Increment(int64(500 * time.Millisecond))
incArgs := incrementArgs([]byte("a"), 5)
ts := clocks[0].Now()
if _, err := client.SendWrappedWith(context.Background(), rg1(mtc.stores[0]), roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for that command to execute on all the followers.
util.SucceedsSoon(t, func() error {
values := []int64{}
for _, eng := range mtc.engines {
val, _, err := engine.MVCCGet(context.Background(), eng, roachpb.Key("a"), clocks[0].Now(), true, nil)
if err != nil {
return err
}
values = append(values, mustGetInt(val))
}
if !reflect.DeepEqual(values, []int64{5, 5, 5}) {
return errors.Errorf("expected (5, 5, 5), got %v", values)
}
return nil
})
// Verify that all the followers have accepted the clock update from
// node 0 even though it comes from outside the usual max offset.
now := clocks[0].Now()
for i, clock := range clocks {
// Only compare the WallTimes: it's normal for clock 0 to be a few logical ticks ahead.
if clock.Now().WallTime < now.WallTime {
t.Errorf("clock %d is behind clock 0: %s vs %s", i, clock.Now(), now)
}
}
}
示例7: getTxn
// getTxn fetches the requested key and returns the transaction info.
func getTxn(coord *TxnCoordSender, txn *roachpb.Transaction) (*roachpb.Transaction, *roachpb.Error) {
hb := &roachpb.HeartbeatTxnRequest{
Span: roachpb.Span{
Key: txn.Key,
},
}
reply, pErr := client.SendWrappedWith(context.Background(), coord, roachpb.Header{
Txn: txn,
}, hb)
if pErr != nil {
return nil, pErr
}
return reply.(*roachpb.HeartbeatTxnResponse).Txn, nil
}
示例8: FindRangeLease
// FindRangeLease is similar to FindRangeLeaseHolder but returns a Lease proto
// without verifying if the lease is still active. Instead, it returns a time-
// stamp taken off the queried node's clock.
func (tc *TestCluster) FindRangeLease(
rangeDesc *roachpb.RangeDescriptor, hint *ReplicationTarget,
) (_ *roachpb.Lease, now hlc.Timestamp, _ error) {
if hint != nil {
var ok bool
if _, ok = rangeDesc.GetReplicaDescriptor(hint.StoreID); !ok {
return nil, hlc.ZeroTimestamp, errors.Errorf(
"bad hint: %+v; store doesn't have a replica of the range", hint)
}
} else {
hint = &ReplicationTarget{
NodeID: rangeDesc.Replicas[0].NodeID,
StoreID: rangeDesc.Replicas[0].StoreID}
}
// Find the server indicated by the hint and send a LeaseInfoRequest through
// it.
var hintServer *server.TestServer
for _, s := range tc.Servers {
if s.GetNode().Descriptor.NodeID == hint.NodeID {
hintServer = s
break
}
}
if hintServer == nil {
return nil, hlc.ZeroTimestamp, errors.Errorf("bad hint: %+v; no such node", hint)
}
leaseReq := roachpb.LeaseInfoRequest{
Span: roachpb.Span{
Key: rangeDesc.StartKey.AsRawKey(),
},
}
leaseResp, pErr := client.SendWrappedWith(
context.TODO(),
hintServer.DB().GetSender(),
roachpb.Header{
// INCONSISTENT read, since we want to make sure that the node used to
// send this is the one that processes the command, for the hint to
// matter.
ReadConsistency: roachpb.INCONSISTENT,
},
&leaseReq)
if pErr != nil {
return nil, hlc.ZeroTimestamp, pErr.GoError()
}
return leaseResp.(*roachpb.LeaseInfoResponse).Lease, hintServer.Clock().Now(), nil
}
示例9: fillTestRange
func fillTestRange(t testing.TB, rep *Replica, size int64) {
src := rand.New(rand.NewSource(0))
for i := int64(0); i < size/int64(keySize+valSize); i++ {
key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize))
val := randutil.RandBytes(src, valSize)
pArgs := putArgs(key, val)
if _, pErr := client.SendWrappedWith(context.Background(), rep, roachpb.Header{
RangeID: rangeID,
}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
rep.mu.Lock()
after := rep.mu.state.Stats.Total()
rep.mu.Unlock()
if after < size {
t.Fatalf("range not full after filling: wrote %d, but range at %d", size, after)
}
}
示例10: LeaseInfo
// LeaseInfo runs a LeaseInfoRequest using the specified server.
func LeaseInfo(
t *testing.T,
db *client.DB,
rangeDesc roachpb.RangeDescriptor,
readConsistency roachpb.ReadConsistencyType,
) roachpb.LeaseInfoResponse {
leaseInfoReq := &roachpb.LeaseInfoRequest{
Span: roachpb.Span{
Key: rangeDesc.StartKey.AsRawKey(),
},
}
reply, pErr := client.SendWrappedWith(context.Background(), db.GetSender(), roachpb.Header{
ReadConsistency: readConsistency,
}, leaseInfoReq)
if pErr != nil {
t.Fatal(pErr)
}
return *(reply.(*roachpb.LeaseInfoResponse))
}
示例11: TestErrorHandlingForNonKVCommand
// Test that an error encountered by a read-only "NonKV" command is not
// swallowed, and doesn't otherwise cause a panic.
// We had a bug cause by the fact that errors for these commands aren't passed
// through the epilogue returned by replica.beginCommands() and were getting
// swallowed.
func TestErrorHandlingForNonKVCommand(t *testing.T) {
defer leaktest.AfterTest(t)()
cmdFilter := func(fArgs storagebase.FilterArgs) *roachpb.Error {
if fArgs.Hdr.UserPriority == 42 {
return roachpb.NewErrorf("injected error")
}
return nil
}
srv, _, _ := serverutils.StartServer(t,
base.TestServerArgs{
Knobs: base.TestingKnobs{
Store: &storage.StoreTestingKnobs{
TestingCommandFilter: cmdFilter,
},
},
})
s := srv.(*server.TestServer)
defer s.Stopper().Stop()
// Send the lease request.
key := roachpb.Key("a")
leaseReq := roachpb.LeaseInfoRequest{
Span: roachpb.Span{
Key: key,
},
}
_, pErr := client.SendWrappedWith(
context.Background(),
s.DistSender(),
roachpb.Header{UserPriority: 42},
&leaseReq,
)
if !testutils.IsPError(pErr, "injected error") {
t.Fatalf("expected error %q, got: %s", "injected error", pErr)
}
}
示例12: TestLeaseExtensionNotBlockedByRead
// Test that a lease extension (a RequestLeaseRequest that doesn't change the
// lease holder) is not blocked by ongoing reads.
// The test relies on two things:
// 1) Lease extensions, unlike lease transfers, are not blocked by reads through their
// PostCommitTrigger.noConcurrentReads.
// 2) Requests with the non-KV flag, such as RequestLeaseRequest, do not
// go through the command queue.
func TestLeaseExtensionNotBlockedByRead(t *testing.T) {
defer leaktest.AfterTest(t)()
readBlocked := make(chan struct{})
cmdFilter := func(fArgs storagebase.FilterArgs) *roachpb.Error {
if fArgs.Hdr.UserPriority == 42 {
// Signal that the read is blocked.
readBlocked <- struct{}{}
// Wait for read to be unblocked.
<-readBlocked
}
return nil
}
srv, _, _ := serverutils.StartServer(t,
base.TestServerArgs{
Knobs: base.TestingKnobs{
Store: &storage.StoreTestingKnobs{
TestingCommandFilter: cmdFilter,
},
},
})
s := srv.(*server.TestServer)
defer s.Stopper().Stop()
// Start a read and wait for it to block.
key := roachpb.Key("a")
errChan := make(chan error)
go func() {
getReq := roachpb.GetRequest{
Span: roachpb.Span{
Key: key,
},
}
if _, pErr := client.SendWrappedWith(context.Background(), s.DistSender(),
roachpb.Header{UserPriority: 42},
&getReq); pErr != nil {
errChan <- pErr.GoError()
}
}()
select {
case err := <-errChan:
t.Fatal(err)
case <-readBlocked:
// Send the lease request.
rKey, err := keys.Addr(key)
if err != nil {
t.Fatal(err)
}
_, repDesc, err := s.Stores().LookupReplica(rKey, nil)
if err != nil {
t.Fatal(err)
}
leaseReq := roachpb.RequestLeaseRequest{
Span: roachpb.Span{
Key: key,
},
Lease: roachpb.Lease{
Start: s.Clock().Now(),
StartStasis: s.Clock().Now().Add(time.Second.Nanoseconds(), 0),
Expiration: s.Clock().Now().Add(2*time.Second.Nanoseconds(), 0),
Replica: repDesc,
},
}
if _, pErr := client.SendWrapped(context.Background(), s.DistSender(), &leaseReq); pErr != nil {
t.Fatal(pErr)
}
// Unblock the read.
readBlocked <- struct{}{}
}
}
示例13: TestTimeSeriesMaintenanceQueue
// TestTimeSeriesMaintenanceQueue verifies shouldQueue and process method
// pass the correct data to the store's TimeSeriesData
func TestTimeSeriesMaintenanceQueue(t *testing.T) {
defer leaktest.AfterTest(t)()
model := &modelTimeSeriesDataStore{
t: t,
pruneSeenStartKeys: make(map[string]struct{}),
pruneSeenEndKeys: make(map[string]struct{}),
}
manual := hlc.NewManualClock(1)
cfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
cfg.TimeSeriesDataStore = model
cfg.TestingKnobs.DisableScanner = true
cfg.TestingKnobs.DisableSplitQueue = true
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithConfig(t, stopper, cfg)
// Generate several splits.
splitKeys := []roachpb.Key{roachpb.Key("c"), roachpb.Key("b"), roachpb.Key("a")}
for _, k := range splitKeys {
repl := store.LookupReplica(roachpb.RKey(k), nil)
args := adminSplitArgs(k, k)
if _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{
RangeID: repl.RangeID,
}, args); pErr != nil {
t.Fatal(pErr)
}
}
// Generate a list of start/end keys the model should have been passed by
// the queue. This consists of all split keys, with KeyMin as an additional
// start and KeyMax as an additional end.
expectedStartKeys := make(map[string]struct{})
expectedEndKeys := make(map[string]struct{})
expectedStartKeys[roachpb.KeyMin.String()] = struct{}{}
expectedEndKeys[roachpb.KeyMax.String()] = struct{}{}
for _, expected := range splitKeys {
expectedStartKeys[expected.String()] = struct{}{}
expectedEndKeys[expected.String()] = struct{}{}
}
// Wait for splits to complete and system config to be available.
util.SucceedsSoon(t, func() error {
if a, e := store.ReplicaCount(), len(expectedEndKeys); a != e {
return fmt.Errorf("expected %d replicas in store; found %d", a, e)
}
if _, ok := store.Gossip().GetSystemConfig(); !ok {
return fmt.Errorf("system config not yet available")
}
return nil
})
// Force replica scan to run, which will populate the model.
now := store.Clock().Now()
store.ForceTimeSeriesMaintenanceQueueProcess()
// Wait for processing to complete.
util.SucceedsSoon(t, func() error {
model.Lock()
defer model.Unlock()
if a, e := model.containsCalled, len(expectedStartKeys); a != e {
return fmt.Errorf("ContainsTimeSeries called %d times; expected %d", a, e)
}
if a, e := model.pruneCalled, len(expectedStartKeys); a != e {
return fmt.Errorf("PruneTimeSeries called %d times; expected %d", a, e)
}
return nil
})
model.Lock()
if a, e := model.pruneSeenStartKeys, expectedStartKeys; !reflect.DeepEqual(a, e) {
t.Errorf("start keys seen by PruneTimeSeries did not match expectation: %s", pretty.Diff(a, e))
}
if a, e := model.pruneSeenEndKeys, expectedEndKeys; !reflect.DeepEqual(a, e) {
t.Errorf("end keys seen by PruneTimeSeries did not match expectation: %s", pretty.Diff(a, e))
}
model.Unlock()
util.SucceedsSoon(t, func() error {
keys := []roachpb.RKey{roachpb.RKeyMin}
for _, k := range splitKeys {
keys = append(keys, roachpb.RKey(k))
}
for _, key := range keys {
repl := store.LookupReplica(key, nil)
ts, err := repl.GetQueueLastProcessed(context.TODO(), "timeSeriesMaintenance")
if err != nil {
return err
}
if ts.Less(now) {
return errors.Errorf("expected last processed %s > %s", ts, now)
}
}
return nil
})
//.........这里部分代码省略.........
示例14: TestStoreRangeMergeMetadataCleanup
// TestStoreRangeMergeMetadataCleanup tests that all metadata of a
// subsumed range is cleaned up on merge.
func TestStoreRangeMergeMetadataCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
scan := func(f func(roachpb.KeyValue) (bool, error)) {
if _, err := engine.MVCCIterate(context.Background(), store.Engine(), roachpb.KeyMin, roachpb.KeyMax, hlc.ZeroTimestamp, true, nil, false, f); err != nil {
t.Fatal(err)
}
}
content := roachpb.Key("testing!")
// Write some values left of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
t.Fatal(err)
}
// Collect all the keys.
preKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
preKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Split the range.
_, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values right of the split key.
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
// Collect all the keys again.
postKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
postKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Compute the new keys.
for k := range preKeys {
delete(postKeys, k)
}
// Keep only the subsumed range's local keys.
localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(bDesc.RangeID))
for k := range postKeys {
if !strings.HasPrefix(k, localRangeKeyPrefix) {
delete(postKeys, k)
}
}
if numKeys := len(postKeys); numKeys > 0 {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys)
for k := range postKeys {
fmt.Fprintf(&buf, "%q\n", k)
}
t.Fatal(buf.String())
}
}
示例15: TestStoreRangeMergeWithData
// TestStoreRangeMergeWithData attempts to merge two collocate ranges
// each containing data.
func TestStoreRangeMergeWithData(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
content := roachpb.Key("testing!")
aDesc, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values left and right of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Confirm the values are there.
gArgs := getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(context.Background(), rg1(store), &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
// Verify no intents remains on range descriptor keys.
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(aDesc.StartKey), keys.RangeDescriptorKey(bDesc.StartKey)} {
if _, _, err := engine.MVCCGet(context.Background(), store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
t.Fatal(err)
}
}
// Verify the merge by looking up keys from both ranges.
rangeA := store.LookupReplica([]byte("a"), nil)
rangeB := store.LookupReplica([]byte("c"), nil)
rangeADesc := rangeA.Desc()
rangeBDesc := rangeB.Desc()
if !reflect.DeepEqual(rangeA, rangeB) {
t.Fatalf("ranges were not merged %+v=%+v", rangeADesc, rangeBDesc)
}
if !bytes.Equal(rangeADesc.StartKey, roachpb.RKeyMin) {
t.Fatalf("The start key is not equal to KeyMin %q=%q", rangeADesc.StartKey, roachpb.RKeyMin)
}
if !bytes.Equal(rangeADesc.EndKey, roachpb.RKeyMax) {
t.Fatalf("The end key is not equal to KeyMax %q=%q", rangeADesc.EndKey, roachpb.RKeyMax)
}
// Try to get values from after the merge.
gArgs = getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(context.Background(), rg1(store), &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: rangeB.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Put new values after the merge on both sides.
pArgs = putArgs([]byte("aaaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
//.........这里部分代码省略.........