本文整理汇总了Golang中github.com/cockroachdb/cockroach/client.SendWrapped函数的典型用法代码示例。如果您正苦于以下问题:Golang SendWrapped函数的具体用法?Golang SendWrapped怎么用?Golang SendWrapped使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SendWrapped函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestSplitSnapshotRace_SplitWins
// TestSplitSnapshotRace_SplitWins exercises one outcome of the
// split/snapshot race: The left side of the split propagates first,
// so the split completes before it sees a competing snapshot. This is
// the more common outcome in practice.
func TestSplitSnapshotRace_SplitWins(t *testing.T) {
defer leaktest.AfterTest(t)
mtc, leftKey, rightKey := setupSplitSnapshotRace(t)
defer mtc.Stop()
// Bring the left range up first so that the split happens before it sees a snapshot.
for i := 1; i <= 3; i++ {
mtc.restartStore(i)
}
// Perform a write on the left range and wait for it to propagate.
incArgs := incrementArgs(leftKey, 10)
if _, err := client.SendWrapped(mtc.distSender, nil, &incArgs); err != nil {
t.Fatal(err)
}
mtc.waitForValues(leftKey, 3*time.Second, []int64{0, 11, 11, 11, 0, 0})
// Now wake the other stores up.
mtc.restartStore(4)
mtc.restartStore(5)
// Write to the right range.
incArgs = incrementArgs(rightKey, 20)
if _, err := client.SendWrapped(mtc.distSender, nil, &incArgs); err != nil {
t.Fatal(err)
}
mtc.waitForValues(rightKey, 3*time.Second, []int64{0, 0, 0, 22, 22, 22})
}
示例2: TestLeaderAfterSplit
// TestLeaderAfterSplit verifies that a raft group created by a split
// elects a leader without waiting for an election timeout.
func TestLeaderAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)
storeContext := storage.TestStoreContext
storeContext.RaftElectionTimeoutTicks = 1000000
mtc := &multiTestContext{
storeContext: &storeContext,
}
mtc.Start(t, 3)
defer mtc.Stop()
mtc.replicateRange(1, 0, 1, 2)
leftKey := roachpb.Key("a")
splitKey := roachpb.Key("m")
rightKey := roachpb.Key("z")
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(mtc.distSender, nil, &splitArgs); err != nil {
t.Fatal(err)
}
incArgs := incrementArgs(leftKey, 1)
if _, err := client.SendWrapped(mtc.distSender, nil, &incArgs); err != nil {
t.Fatal(err)
}
incArgs = incrementArgs(rightKey, 2)
if _, err := client.SendWrapped(mtc.distSender, nil, &incArgs); err != nil {
t.Fatal(err)
}
}
示例3: TestStoreResolveWriteIntentRollback
// TestStoreResolveWriteIntentRollback verifies that resolving a write
// intent by aborting it yields the previous value.
func TestStoreResolveWriteIntentRollback(t *testing.T) {
defer leaktest.AfterTest(t)
store, _, stopper := createTestStore(t)
defer stopper.Stop()
key := roachpb.Key("a")
pusher := newTransaction("test", key, 1, roachpb.SERIALIZABLE, store.ctx.Clock)
pushee := newTransaction("test", key, 1, roachpb.SERIALIZABLE, store.ctx.Clock)
pushee.Priority = 1
pusher.Priority = 2 // Pusher will win.
// First lay down intent using the pushee's txn.
args := incrementArgs(key, 1, 1, store.StoreID())
args.Txn = pushee
if _, err := client.SendWrapped(store, nil, &args); err != nil {
t.Fatal(err)
}
// Now, try a put using the pusher's txn.
args.Txn = pusher
args.Increment = 2
if resp, err := client.SendWrapped(store, nil, &args); err != nil {
t.Errorf("expected increment to succeed: %s", err)
} else if reply := resp.(*roachpb.IncrementResponse); reply.NewValue != 2 {
t.Errorf("expected rollback of earlier increment to yield increment value of 2; got %d", reply.NewValue)
}
}
示例4: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s := StartTestServer(t)
defer s.Stop()
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = s.stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, s.Gossip())
tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, tracing.NewTracer(),
s.stopper, kv.NewTxnMetrics(metric.NewRegistry()))
for _, sk := range tc.splitKeys {
if err := s.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(tds, nil, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
reply, err := client.SendWrapped(tds, nil, scan)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
}
}
示例5: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5, 1, mtc.stores[0].StoreID())
if _, err := client.SendWrapped(mtc.stores[0], nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.RangeMetaKey(roachpb.KeyMax)
meta1 := keys.RangeMetaKey(meta2)
for _, key := range []roachpb.Key{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key)
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, 1*time.Second, func() error {
getArgs := getArgs([]byte("a"), 1, mtc.stores[1].StoreID())
getArgs.ReadConsistency = roachpb.INCONSISTENT
if reply, err := client.SendWrapped(mtc.stores[1], nil, &getArgs); err != nil {
return util.Errorf("failed to read data")
} else if v := mustGetInt(reply.(*roachpb.GetResponse).Value); v != 5 {
return util.Errorf("failed to read correct data: %d", v)
}
return nil
})
}
示例6: TestMultiRangeScanReverseScanInconsistent
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)
s, db := setupMultipleRanges(t, "b")
defer s.Stop()
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := []string{"a", "b"}
ts := []time.Time{}
for i, key := range keys {
b := &client.Batch{}
b.Put(key, "value")
if err := db.Run(b); err != nil {
t.Fatal(err)
}
ts = append(ts, b.Results[0].Rows[0].Timestamp())
log.Infof("%d: %s", i, b.Results[0].Rows[0].Timestamp())
}
// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
// it does the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())
// Scan.
sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
sa.ReadConsistency = roachpb.INCONSISTENT
reply, err := client.SendWrapped(ds, nil, sa)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
// ReverseScan.
rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
rsa.ReadConsistency = roachpb.INCONSISTENT
reply, err = client.SendWrapped(ds, nil, rsa)
if err != nil {
t.Fatal(err)
}
rsr := reply.(*roachpb.ReverseScanResponse)
if l := len(rsr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(rsr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
示例7: TestStoreRecoverWithErrors
// TestStoreRecoverWithErrors verifies that even commands that fail are marked as
// applied so they are not retried after recovery.
func TestStoreRecoverWithErrors(t *testing.T) {
defer leaktest.AfterTest(t)
defer func() { storage.TestingCommandFilter = nil }()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
eng := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
numIncrements := 0
storage.TestingCommandFilter = func(_ roachpb.StoreID, args roachpb.Request, _ roachpb.Header) error {
if _, ok := args.(*roachpb.IncrementRequest); ok && args.Header().Key.Equal(roachpb.Key("a")) {
numIncrements++
}
return nil
}
func() {
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithEngine(t, eng, clock, true, nil, stopper)
// Write a bytes value so the increment will fail.
putArgs := putArgs(roachpb.Key("a"), []byte("asdf"))
if _, err := client.SendWrapped(rg1(store), nil, &putArgs); err != nil {
t.Fatal(err)
}
// Try and fail to increment the key. It is important for this test that the
// failure be the last thing in the raft log when the store is stopped.
incArgs := incrementArgs(roachpb.Key("a"), 42)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err == nil {
t.Fatal("did not get expected error")
}
}()
if numIncrements != 1 {
t.Fatalf("expected 1 increments; was %d", numIncrements)
}
// Recover from the engine.
store := createTestStoreWithEngine(t, eng, clock, false, nil, engineStopper)
// Issue a no-op write to lazily initialize raft on the range.
incArgs := incrementArgs(roachpb.Key("b"), 0)
if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err != nil {
t.Fatal(err)
}
// No additional increments were performed on key A during recovery.
if numIncrements != 1 {
t.Fatalf("expected 1 increments; was %d", numIncrements)
}
}
示例8: TestStoreResolveWriteIntent
// TestStoreResolveWriteIntent adds write intent and then verifies
// that a put returns success and aborts intent's txn in the event the
// pushee has lower priority. Othwerise, verifies that a
// TransactionPushError is returned.
func TestStoreResolveWriteIntent(t *testing.T) {
defer leaktest.AfterTest(t)
store, _, stopper := createTestStore(t)
defer stopper.Stop()
for i, resolvable := range []bool{true, false} {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pusher := newTransaction("test", key, 1, roachpb.SERIALIZABLE, store.ctx.Clock)
pushee := newTransaction("test", key, 1, roachpb.SERIALIZABLE, store.ctx.Clock)
if resolvable {
pushee.Priority = 1
pusher.Priority = 2 // Pusher will win.
} else {
pushee.Priority = 2
pusher.Priority = 1 // Pusher will lose.
}
// First lay down intent using the pushee's txn.
pArgs := putArgs(key, []byte("value"), 1, store.StoreID())
pArgs.Txn = pushee
if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
t.Fatal(err)
}
// Now, try a put using the pusher's txn.
pArgs.Txn = pusher
_, err := client.SendWrapped(store, nil, &pArgs)
if resolvable {
if err != nil {
t.Errorf("expected intent resolved; got unexpected error: %s", err)
}
txnKey := keys.TransactionKey(pushee.Key, pushee.ID)
var txn roachpb.Transaction
ok, err := engine.MVCCGetProto(store.Engine(), txnKey, roachpb.ZeroTimestamp, true, nil, &txn)
if !ok || err != nil {
t.Fatalf("not found or err: %s", err)
}
if txn.Status != roachpb.ABORTED {
t.Errorf("expected pushee to be aborted; got %s", txn.Status)
}
} else {
if rErr, ok := err.(*roachpb.TransactionPushError); !ok {
t.Errorf("expected txn push error; got %s", err)
} else if !bytes.Equal(rErr.PusheeTxn.ID, pushee.ID) {
t.Errorf("expected txn to match pushee %q; got %s", pushee.ID, rErr)
}
// Trying again should fail again.
if _, err := client.SendWrapped(store, nil, &pArgs); err == nil {
t.Errorf("expected another error on latent write intent but succeeded")
}
}
}
}
示例9: TestStoreScanInconsistentResolvesIntents
// TestStoreScanInconsistentResolvesIntents lays down 10 intents,
// commits the txn without resolving intents, then does repeated
// inconsistent reads until the data shows up, showing that the
// inconsistent reads are triggering intent resolution.
func TestStoreScanInconsistentResolvesIntents(t *testing.T) {
defer leaktest.AfterTest(t)
// This test relies on having a committed Txn record and open intents on
// the same Range. This only works with auto-gc turned off; alternatively
// the test could move to splitting its underlying Range.
defer setTxnAutoGC(false)()
var intercept atomic.Value
intercept.Store(true)
TestingCommandFilter = func(args roachpb.Request) error {
if _, ok := args.(*roachpb.ResolveIntentRequest); ok && intercept.Load().(bool) {
return util.Errorf("error on purpose")
}
return nil
}
store, _, stopper := createTestStore(t)
defer func() { TestingCommandFilter = nil }()
defer stopper.Stop()
// Lay down 10 intents to scan over.
txn := newTransaction("test", roachpb.Key("foo"), 1, roachpb.SERIALIZABLE, store.ctx.Clock)
keys := []roachpb.Key{}
for j := 0; j < 10; j++ {
key := roachpb.Key(fmt.Sprintf("key%02d", j))
keys = append(keys, key)
args := putArgs(key, []byte(fmt.Sprintf("value%02d", j)), 1, store.StoreID())
args.Txn = txn
if _, err := client.SendWrapped(store, nil, &args); err != nil {
t.Fatal(err)
}
}
// Now, commit txn without resolving intents. If we hadn't disabled auto-gc
// of Txn entries in this test, the Txn entry would be removed and later
// attempts to resolve the intents would fail.
etArgs := endTxnArgs(txn, true, 1, store.StoreID())
if _, err := client.SendWrapped(store, nil, &etArgs); err != nil {
t.Fatal(err)
}
intercept.Store(false) // allow async intent resolution
// Scan the range repeatedly until we've verified count.
sArgs := scanArgs(keys[0], keys[9].Next(), 1, store.StoreID())
sArgs.ReadConsistency = roachpb.INCONSISTENT
util.SucceedsWithin(t, time.Second, func() error {
if reply, err := client.SendWrapped(store, nil, &sArgs); err != nil {
return err
} else if sReply := reply.(*roachpb.ScanResponse); len(sReply.Rows) != 10 {
return util.Errorf("could not read rows as expected")
}
return nil
})
}
示例10: TestTxnMultipleCoord
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
for i, tc := range []struct {
args roachpb.Request
writing bool
ok bool
}{
{roachpb.NewGet(roachpb.Key("a")), true, true},
{roachpb.NewGet(roachpb.Key("a")), false, true},
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false, true},
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true, false},
} {
{
txn := newTxn(s.Clock, roachpb.Key("a"))
txn.Writing = tc.writing
tc.args.Header().Txn = txn
}
reply, err := client.SendWrapped(s.Sender, nil, tc.args)
if err == nil != tc.ok {
t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
i, tc.args, tc.writing, tc.ok, err)
}
if err != nil {
continue
}
txn := reply.Header().Txn
// The transaction should come back rw if it started rw or if we just
// wrote.
isWrite := roachpb.IsTransactionWrite(tc.args)
if (tc.writing || isWrite) != txn.Writing {
t.Errorf("%d: unexpected writing state: %s", i, txn)
}
if !isWrite {
continue
}
// Abort for clean shutdown.
if _, err := client.SendWrapped(s.Sender, nil, &roachpb.EndTransactionRequest{
RequestHeader: roachpb.RequestHeader{
Txn: txn,
},
Commit: false,
}); err != nil {
t.Fatal(err)
}
}
}
示例11: TestReplicateAfterSplit
// TestReplicateAfterSplit verifies that a new replica whose start key
// is not KeyMin replicating to a fresh store can apply snapshots correctly.
func TestReplicateAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key := roachpb.Key("z")
store0 := mtc.stores[0]
// Make the split
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey, rangeID, store0.StoreID())
if _, err := client.SendWrapped(store0, nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 := store0.LookupReplica(key, nil).Desc().RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
// Issue an increment for later check.
incArgs := incrementArgs(key, 11, rangeID2, store0.StoreID())
if _, err := client.SendWrapped(store0, nil, &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
mtc.replicateRange(rangeID2, 0, 1)
if mtc.stores[1].LookupReplica(key, nil).GetMaxBytes() == 0 {
t.Error("Range MaxBytes is not set after snapshot applied")
}
// Once it catches up, the effects of increment commands can be seen.
if err := util.IsTrueWithin(func() bool {
getArgs := getArgs(key, rangeID2, mtc.stores[1].StoreID())
// Reading on non-leader replica should use inconsistent read
getArgs.ReadConsistency = roachpb.INCONSISTENT
reply, err := client.SendWrapped(mtc.stores[1], nil, &getArgs)
if err != nil {
return false
}
getResp := reply.(*roachpb.GetResponse)
if log.V(1) {
log.Infof("read value %d", mustGetInt(getResp.Value))
}
return mustGetInt(getResp.Value) == 11
}, 1*time.Second); err != nil {
t.Fatal(err)
}
}
示例12: TestTxnCoordSenderCleanupOnAborted
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a
// TransactionAbortedError, the coordinator cleans up the transaction.
func TestTxnCoordSenderCleanupOnAborted(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Create a transaction with intent at "a".
key := roachpb.Key("a")
txn := newTxn(s.Clock, key)
txn.Priority = 1
put := createPutRequest(key, []byte("value"), txn)
if reply, err := client.SendWrapped(s.Sender, nil, put); err != nil {
t.Fatal(err)
} else {
txn = reply.Header().Txn
}
// Push the transaction to abort it.
txn2 := newTxn(s.Clock, key)
txn2.Priority = 2
pushArgs := &roachpb.PushTxnRequest{
RequestHeader: roachpb.RequestHeader{
Key: txn.Key,
},
Now: s.Clock.Now(),
PusherTxn: *txn2,
PusheeTxn: *txn,
PushType: roachpb.ABORT_TXN,
}
if _, err := client.SendWrapped(s.Sender, nil, pushArgs); err != nil {
t.Fatal(err)
}
// Now end the transaction and verify we've cleanup up, even though
// end transaction failed.
etArgs := &roachpb.EndTransactionRequest{
RequestHeader: roachpb.RequestHeader{
Txn: txn,
},
Commit: true,
}
_, err := client.SendWrapped(s.Sender, nil, etArgs)
switch err.(type) {
case *roachpb.TransactionAbortedError:
// Expected
default:
t.Fatalf("expected transaction aborted error; got %s", err)
}
verifyCleanup(key, s.Sender, s.Eng, t)
}
示例13: TestSplitSnapshotRace_SnapshotWins
// TestSplitSnapshotRace_SplitWins exercises one outcome of the
// split/snapshot race: The right side of the split replicates first,
// so target node sees a raft snapshot before it has processed the split,
// so it still has a conflicting range.
func TestSplitSnapshotRace_SnapshotWins(t *testing.T) {
defer leaktest.AfterTest(t)
t.Skip("TODO(bdarnell): https://github.com/cockroachdb/cockroach/issues/3121")
mtc, leftKey, rightKey := setupSplitSnapshotRace(t)
defer mtc.Stop()
// Bring the right range up first.
for i := 3; i <= 5; i++ {
mtc.restartStore(i)
}
// Perform a write on the right range.
incArgs := incrementArgs(rightKey, 20)
if _, err := client.SendWrapped(mtc.distSender, nil, &incArgs); err != nil {
t.Fatal(err)
}
// It immediately propagates between nodes 4 and 5, but node 3
// remains at its old value. It can't accept the right-hand range
// because it conflicts with its not-yet-split copy of the left-hand
// range. This test is not completely deterministic: we want to make
// sure that node 3 doesn't panic when it receives the snapshot, but
// since it silently drops the message there is nothing we can wait
// for. There is a high probability that the message will have been
// received by the time that nodes 4 and 5 have processed their
// update.
mtc.waitForValues(rightKey, 3*time.Second, []int64{0, 0, 0, 2, 22, 22})
// Wake up the left-hand range. This will allow the left-hand
// range's split to complete and unblock the right-hand range.
mtc.restartStore(1)
mtc.restartStore(2)
// Perform writes on both sides. This is not strictly necessary but
// it helps wake up dormant ranges that would otherwise have to wait
// for retry timeouts.
incArgs = incrementArgs(leftKey, 10)
if _, err := client.SendWrapped(mtc.distSender, nil, &incArgs); err != nil {
t.Fatal(err)
}
mtc.waitForValues(leftKey, 3*time.Second, []int64{0, 11, 11, 11, 0, 0})
incArgs = incrementArgs(rightKey, 200)
if _, err := client.SendWrapped(mtc.distSender, nil, &incArgs); err != nil {
t.Fatal(err)
}
mtc.waitForValues(rightKey, 3*time.Second, []int64{0, 0, 0, 222, 222, 222})
}
示例14: TestStoreSend
// TestStoreSend verifies straightforward command execution
// of both a read-only and a read-write command.
func TestStoreSend(t *testing.T) {
defer leaktest.AfterTest(t)
store, _, stopper := createTestStore(t)
defer stopper.Stop()
gArgs := getArgs([]byte("a"), 1, store.StoreID())
// Try a successful get request.
if _, err := client.SendWrapped(store, nil, &gArgs); err != nil {
t.Fatal(err)
}
pArgs := putArgs([]byte("a"), []byte("aaa"), 1, store.StoreID())
if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
t.Fatal(err)
}
}
示例15: TestRangeLookupOptionOnReverseScan
// TestRangeLookupOptionOnReverseScan verifies that a lookup triggered by a
// ReverseScan request has the useReverseScan specified.
func TestRangeLookupOptionOnReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)()
g, s := makeTestGossip(t)
defer s()
var testFn rpcSendFn = func(_ SendOptions, _ ReplicaSlice,
args roachpb.BatchRequest, _ *rpc.Context) (*roachpb.BatchResponse, error) {
return args.CreateReply(), nil
}
ctx := &DistSenderContext{
RPCSend: testFn,
RangeDescriptorDB: mockRangeDescriptorDB(func(k roachpb.RKey, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
if len(k) > 0 && !useReverseScan {
t.Fatalf("expected UseReverseScan to be set")
}
return []roachpb.RangeDescriptor{testRangeDescriptor}, nil
}),
}
ds := NewDistSender(ctx, g)
rScan := &roachpb.ReverseScanRequest{
Span: roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("b")},
}
if _, err := client.SendWrapped(ds, nil, rScan); err != nil {
t.Fatal(err)
}
}