本文整理匯總了Golang中github.com/cockroachdb/cockroach/internal/client.SendWrapped函數的典型用法代碼示例。如果您正苦於以下問題:Golang SendWrapped函數的具體用法?Golang SendWrapped怎麽用?Golang SendWrapped使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了SendWrapped函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestLeaderAfterSplit
// TestLeaderAfterSplit verifies that a raft group created by a split
// elects a leader without waiting for an election timeout.
func TestLeaderAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)()
storeContext := storage.TestStoreContext()
storeContext.RaftElectionTimeoutTicks = 1000000
mtc := &multiTestContext{
storeContext: &storeContext,
}
mtc.Start(t, 3)
defer mtc.Stop()
mtc.replicateRange(1, 1, 2)
leftKey := roachpb.Key("a")
splitKey := roachpb.Key("m")
rightKey := roachpb.Key("z")
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &splitArgs); pErr != nil {
t.Fatal(pErr)
}
incArgs := incrementArgs(leftKey, 1)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
incArgs = incrementArgs(rightKey, 2)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
}
示例2: TestSplitSnapshotRace_SplitWins
// TestSplitSnapshotRace_SplitWins exercises one outcome of the
// split/snapshot race: The left side of the split propagates first,
// so the split completes before it sees a competing snapshot. This is
// the more common outcome in practice.
func TestSplitSnapshotRace_SplitWins(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc, leftKey, rightKey := setupSplitSnapshotRace(t)
defer mtc.Stop()
// Bring the left range up first so that the split happens before it sees a snapshot.
for i := 1; i <= 3; i++ {
mtc.restartStore(i)
}
// Perform a write on the left range and wait for it to propagate.
incArgs := incrementArgs(leftKey, 10)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
mtc.waitForValues(leftKey, []int64{0, 11, 11, 11, 0, 0})
// Now wake the other stores up.
mtc.restartStore(4)
mtc.restartStore(5)
// Write to the right range.
incArgs = incrementArgs(rightKey, 20)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
mtc.waitForValues(rightKey, []int64{0, 0, 0, 25, 25, 25})
}
示例3: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = ts.stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, ts.Gossip())
tds := kv.NewTxnCoordSender(ds, ts.Clock(), ts.Ctx.Linearizable, tracing.NewTracer(),
ts.stopper, kv.NewTxnMetrics(metric.NewRegistry()))
for _, sk := range tc.splitKeys {
if err := ts.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(tds, nil, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
reply, err := client.SendWrapped(tds, nil, scan)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
}
}
示例4: TestStoreRangeMergeTwoEmptyRanges
// TestStoreRangeMergeTwoEmptyRanges tries to merge two empty ranges
// together.
func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) {
defer leaktest.AfterTest(t)()
defer config.TestingDisableTableSplits()()
store, stopper, _ := createTestStore(t)
defer stopper.Stop()
if _, _, err := createSplitRanges(store); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
_, err := client.SendWrapped(rg1(store), nil, &args)
if err != nil {
t.Fatal(err)
}
// Verify the merge by looking up keys from both ranges.
rangeA := store.LookupReplica([]byte("a"), nil)
rangeB := store.LookupReplica([]byte("c"), nil)
if !reflect.DeepEqual(rangeA, rangeB) {
t.Fatalf("ranges were not merged %+v=%+v", rangeA.Desc(), rangeB.Desc())
}
}
示例5: TestStoreRangeMergeTwoEmptyRanges
// TestStoreRangeMergeTwoEmptyRanges tries to merge two empty ranges together.
func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
if _, _, err := createSplitRanges(store); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
_, err := client.SendWrapped(rg1(store), nil, &args)
if err != nil {
t.Fatal(err)
}
// Verify the merge by looking up keys from both ranges.
replicaA := store.LookupReplica([]byte("a"), nil)
replicaB := store.LookupReplica([]byte("c"), nil)
if !reflect.DeepEqual(replicaA, replicaB) {
t.Fatalf("ranges were not merged %s!=%s", replicaA, replicaB)
}
}
示例6: TestRejectFutureCommand
// TestRejectFutureCommand verifies that lease holders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
defer leaktest.AfterTest(t)()
const maxOffset = 100 * time.Millisecond
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(maxOffset)
mtc := multiTestContext{
clock: clock,
}
mtc.Start(t, 1)
defer mtc.Stop()
// First do a write. The first write will advance the clock by MaxOffset
// because of the read cache's low water mark.
getArgs := putArgs([]byte("b"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &getArgs); err != nil {
t.Fatal(err)
}
if now := clock.Now(); now.WallTime != int64(maxOffset) {
t.Fatalf("expected clock to advance to 100ms; got %s", now)
}
// The logical clock has advanced past the physical clock; increment
// the "physical" clock to catch up.
manual.Increment(int64(maxOffset))
startTime := manual.UnixNano()
// Commands with a future timestamp that is within the MaxOffset
// bound will be accepted and will cause the clock to advance.
for i := int64(0); i < 3; i++ {
incArgs := incrementArgs([]byte("a"), 5)
ts := hlc.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
}
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Fatalf("expected clock to advance to 190ms; got %s", now)
}
// Once the accumulated offset reaches MaxOffset, commands will be rejected.
incArgs := incrementArgs([]byte("a"), 11)
ts := hlc.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err == nil {
t.Fatalf("expected clock offset error but got nil")
}
// The clock remained at 190ms and the final command was not executed.
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Errorf("expected clock to advance to 190ms; got %s", now)
}
val, _, err := engine.MVCCGet(context.Background(), mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil)
if err != nil {
t.Fatal(err)
}
if v := mustGetInt(val); v != 15 {
t.Errorf("expected 15, got %v", v)
}
}
示例7: TestSplitSnapshotRace_SnapshotWins
// TestSplitSnapshotRace_SnapshotWins exercises one outcome of the
// split/snapshot race: The right side of the split replicates first,
// so target node sees a raft snapshot before it has processed the split,
// so it still has a conflicting range.
func TestSplitSnapshotRace_SnapshotWins(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc, leftKey, rightKey := setupSplitSnapshotRace(t)
defer mtc.Stop()
// Bring the right range up first.
for i := 3; i <= 5; i++ {
mtc.restartStore(i)
}
// Perform a write on the right range.
incArgs := incrementArgs(rightKey, 20)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
// It immediately propagates between nodes 4 and 5, but node 3
// remains at its old value. It can't accept the right-hand range
// because it conflicts with its not-yet-split copy of the left-hand
// range. This test is not completely deterministic: we want to make
// sure that node 3 doesn't panic when it receives the snapshot, but
// since it silently drops the message there is nothing we can wait
// for. There is a high probability that the message will have been
// received by the time that nodes 4 and 5 have processed their
// update.
mtc.waitForValues(rightKey, []int64{0, 0, 0, 2, 25, 25})
// Wake up the left-hand range. This will allow the left-hand
// range's split to complete and unblock the right-hand range.
mtc.restartStore(1)
mtc.restartStore(2)
// Perform writes on both sides. This is not strictly necessary but
// it helps wake up dormant ranges that would otherwise have to wait
// for retry timeouts.
incArgs = incrementArgs(leftKey, 10)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
mtc.waitForValues(leftKey, []int64{0, 11, 11, 11, 0, 0})
incArgs = incrementArgs(rightKey, 200)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
mtc.waitForValues(rightKey, []int64{0, 0, 0, 225, 225, 225})
}
示例8: TestStoreRangeMergeNonCollocated
// TestStoreRangeMergeNonCollocated attempts to merge two ranges
// that are not on the same stores.
func TestStoreRangeMergeNonCollocated(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 4)
defer mtc.Stop()
store := mtc.stores[0]
// Split into 3 ranges
argsSplit := adminSplitArgs(roachpb.KeyMin, []byte("d"))
if _, pErr := client.SendWrapped(rg1(store), nil, &argsSplit); pErr != nil {
t.Fatalf("Can't split range %s", pErr)
}
argsSplit = adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, pErr := client.SendWrapped(rg1(store), nil, &argsSplit); pErr != nil {
t.Fatalf("Can't split range %s", pErr)
}
rangeA := store.LookupReplica([]byte("a"), nil)
rangeADesc := rangeA.Desc()
rangeB := store.LookupReplica([]byte("c"), nil)
rangeBDesc := rangeB.Desc()
rangeC := store.LookupReplica([]byte("e"), nil)
rangeCDesc := rangeC.Desc()
if bytes.Equal(rangeADesc.StartKey, rangeBDesc.StartKey) {
log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeBDesc.StartKey)
}
if bytes.Equal(rangeBDesc.StartKey, rangeCDesc.StartKey) {
log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeBDesc.StartKey, rangeCDesc.StartKey)
}
if bytes.Equal(rangeADesc.StartKey, rangeCDesc.StartKey) {
log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeCDesc.StartKey)
}
// Replicate the ranges to different sets of stores. Ranges A and C
// are collocated, but B is different.
mtc.replicateRange(rangeA.RangeID, 1, 2)
mtc.replicateRange(rangeB.RangeID, 1, 3)
mtc.replicateRange(rangeC.RangeID, 1, 2)
// Attempt to merge.
rangeADesc = rangeA.Desc()
argsMerge := adminMergeArgs(roachpb.Key(rangeADesc.StartKey))
if _, pErr := rangeA.AdminMerge(context.Background(), argsMerge, rangeADesc); !testutils.IsPError(pErr, "ranges not collocated") {
t.Fatalf("did not got expected error; got %s", pErr)
}
}
示例9: TestStoreRangeMergeStats
// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, manual := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Split the range.
aDesc, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values left and right of the proposed split key.
writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))
// Get the range stats for both ranges now that we have data.
var msA, msB enginepb.MVCCStats
snap := store.Engine().NewSnapshot()
defer snap.Close()
if err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID, &msA); err != nil {
t.Fatal(err)
}
if err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID, &msB); err != nil {
t.Fatal(err)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range A's stats before split: %v", err)
}
if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range B's stats before split: %v", err)
}
manual.Increment(100)
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
rngMerged := store.LookupReplica(aDesc.StartKey, nil)
// Get the range stats for the merged range and verify.
snap = store.Engine().NewSnapshot()
defer snap.Close()
var msMerged enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), snap, rngMerged.RangeID, &msMerged); err != nil {
t.Fatal(err)
}
// Merged stats should agree with recomputation.
if err := verifyRecomputedStats(snap, rngMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
t.Errorf("failed to verify range's stats after merge: %v", err)
}
}
示例10: TestStoreRangeSplitAtRangeBounds
// TestStoreRangeSplitAtRangeBounds verifies a range cannot be split
// at its start or end keys (would create zero-length range!). This
// sort of thing might happen in the wild if two split requests
// arrived for same key. The first one succeeds and second would try
// to split at the start of the newly split range.
func TestStoreRangeSplitAtRangeBounds(t *testing.T) {
defer leaktest.AfterTest(t)()
defer config.TestingDisableTableSplits()()
store, stopper, _ := createTestStore(t)
defer stopper.Stop()
args := adminSplitArgs(roachpb.KeyMin, []byte("a"))
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
// This second split will try to split at end of first split range.
if _, err := client.SendWrapped(rg1(store), nil, &args); err == nil {
t.Fatalf("split succeeded unexpectedly")
}
// Now try to split at start of new range.
args = adminSplitArgs(roachpb.KeyMin, []byte("a"))
if _, err := client.SendWrapped(rg1(store), nil, &args); err == nil {
t.Fatalf("split succeeded unexpectedly")
}
}
示例11: TestStoreRangeMergeLastRange
// TestStoreRangeMergeLastRange verifies that merging the last range
// fails.
func TestStoreRangeMergeLastRange(t *testing.T) {
defer leaktest.AfterTest(t)()
defer config.TestingDisableTableSplits()()
store, stopper, _ := createTestStore(t)
defer stopper.Stop()
// Merge last range.
args := adminMergeArgs(roachpb.KeyMin)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); !testutils.IsPError(pErr, "cannot merge final range") {
t.Fatalf("expected 'cannot merge final range' error; got %s", pErr)
}
}
示例12: BenchmarkStoreRangeSplit
func BenchmarkStoreRangeSplit(b *testing.B) {
defer tracing.Disable()()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(b, sCtx)
defer stopper.Stop()
// Perform initial split of ranges.
sArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, err := client.SendWrapped(rg1(store), nil, &sArgs); err != nil {
b.Fatal(err)
}
// Write some values left and right of the split key.
aDesc := store.LookupReplica([]byte("a"), nil).Desc()
bDesc := store.LookupReplica([]byte("c"), nil).Desc()
writeRandomDataToRange(b, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(b, store, bDesc.RangeID, []byte("ccc"))
// Merge the b range back into the a range.
mArgs := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(rg1(store), nil, &mArgs); err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Split the range.
b.StartTimer()
if _, err := client.SendWrapped(rg1(store), nil, &sArgs); err != nil {
b.Fatal(err)
}
// Merge the ranges.
b.StopTimer()
if _, err := client.SendWrapped(rg1(store), nil, &mArgs); err != nil {
b.Fatal(err)
}
}
}
示例13: TestStoreRangeMergeLastRange
// TestStoreRangeMergeLastRange verifies that merging the last range
// fails.
func TestStoreRangeMergeLastRange(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Merge last range.
args := adminMergeArgs(roachpb.KeyMin)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); !testutils.IsPError(pErr, "cannot merge final range") {
t.Fatalf("expected 'cannot merge final range' error; got %s", pErr)
}
}
示例14: TestStoreRangeSplitInsideRow
// TestStoreRangeSplitInsideRow verifies an attempt to split a range inside of
// a table row will cause a split at a boundary between rows.
func TestStoreRangeSplitInsideRow(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Manually create some the column keys corresponding to the table:
//
// CREATE TABLE t (id STRING PRIMARY KEY, col1 INT, col2 INT)
tableKey := keys.MakeTablePrefix(keys.MaxReservedDescID + 1)
rowKey := roachpb.Key(encoding.EncodeVarintAscending(append([]byte(nil), tableKey...), 1))
rowKey = encoding.EncodeStringAscending(encoding.EncodeVarintAscending(rowKey, 1), "a")
col1Key := keys.MakeFamilyKey(append([]byte(nil), rowKey...), 1)
col2Key := keys.MakeFamilyKey(append([]byte(nil), rowKey...), 2)
// We don't care about the value, so just store any old thing.
if err := store.DB().Put(col1Key, "column 1"); err != nil {
t.Fatal(err)
}
if err := store.DB().Put(col2Key, "column 2"); err != nil {
t.Fatal(err)
}
// Split between col1Key and col2Key by splitting before col2Key.
args := adminSplitArgs(col2Key, col2Key)
_, err := client.SendWrapped(rg1(store), nil, &args)
if err != nil {
t.Fatalf("%s: split unexpected error: %s", col1Key, err)
}
rng1 := store.LookupReplica(col1Key, nil)
rng2 := store.LookupReplica(col2Key, nil)
// Verify the two columns are still on the same range.
if !reflect.DeepEqual(rng1, rng2) {
t.Fatalf("%s: ranges differ: %+v vs %+v", roachpb.Key(col1Key), rng1, rng2)
}
// Verify we split on a row key.
if startKey := rng1.Desc().StartKey; !startKey.Equal(rowKey) {
t.Fatalf("%s: expected split on %s, but found %s",
roachpb.Key(col1Key), roachpb.Key(rowKey), startKey)
}
// Verify the previous range was split on a row key.
rng3 := store.LookupReplica(tableKey, nil)
if endKey := rng3.Desc().EndKey; !endKey.Equal(rowKey) {
t.Fatalf("%s: expected split on %s, but found %s",
roachpb.Key(col1Key), roachpb.Key(rowKey), endKey)
}
}
示例15: TestStoreRangeSplitAtTablePrefix
// TestStoreRangeSplitAtTablePrefix verifies a range can be split at
// UserTableDataMin and still gossip the SystemConfig properly.
func TestStoreRangeSplitAtTablePrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
key := keys.MakeRowSentinelKey(append([]byte(nil), keys.UserTableDataMin...))
args := adminSplitArgs(key, key)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
t.Fatalf("%q: split unexpected error: %s", key, pErr)
}
var desc sqlbase.TableDescriptor
descBytes, err := protoutil.Marshal(&desc)
if err != nil {
t.Fatal(err)
}
// Update SystemConfig to trigger gossip.
if err := store.DB().Txn(func(txn *client.Txn) error {
txn.SetSystemConfigTrigger()
// We don't care about the values, just the keys.
k := sqlbase.MakeDescMetadataKey(sqlbase.ID(keys.MaxReservedDescID + 1))
return txn.Put(k, &desc)
}); err != nil {
t.Fatal(err)
}
successChan := make(chan struct{}, 1)
store.Gossip().RegisterCallback(gossip.KeySystemConfig, func(_ string, content roachpb.Value) {
contentBytes, err := content.GetBytes()
if err != nil {
t.Fatal(err)
}
if bytes.Contains(contentBytes, descBytes) {
select {
case successChan <- struct{}{}:
default:
}
}
})
select {
case <-time.After(time.Second):
t.Errorf("expected a schema gossip containing %q, but did not see one", descBytes)
case <-successChan:
}
}