本文整理匯總了Golang中github.com/cockroachdb/cockroach/client.SendWrappedWith函數的典型用法代碼示例。如果您正苦於以下問題:Golang SendWrappedWith函數的具體用法?Golang SendWrappedWith怎麽用?Golang SendWrappedWith使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了SendWrappedWith函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRejectFutureCommand
// TestRejectFutureCommand verifies that leaders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
defer leaktest.AfterTest(t)()
const maxOffset = 100 * time.Millisecond
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(maxOffset)
mtc := multiTestContext{
clock: clock,
}
mtc.Start(t, 1)
defer mtc.Stop()
// First do a write. The first write will advance the clock by MaxOffset
// because of the read cache's low water mark.
getArgs := putArgs([]byte("b"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &getArgs); err != nil {
t.Fatal(err)
}
if now := clock.Now(); now.WallTime != int64(maxOffset) {
t.Fatalf("expected clock to advance to 100ms; got %s", now)
}
// The logical clock has advanced past the physical clock; increment
// the "physical" clock to catch up.
manual.Increment(int64(maxOffset))
startTime := manual.UnixNano()
// Commands with a future timestamp that is within the MaxOffset
// bound will be accepted and will cause the clock to advance.
for i := int64(0); i < 3; i++ {
incArgs := incrementArgs([]byte("a"), 5)
ts := roachpb.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
}
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Fatalf("expected clock to advance to 190ms; got %s", now)
}
// Once the accumulated offset reaches MaxOffset, commands will be rejected.
incArgs := incrementArgs([]byte("a"), 11)
ts := roachpb.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err == nil {
t.Fatalf("expected clock offset error but got nil")
}
// The clock remained at 190ms and the final command was not executed.
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Errorf("expected clock to advance to 190ms; got %s", now)
}
val, _, err := engine.MVCCGet(mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil)
if err != nil {
t.Fatal(err)
}
if v := mustGetInt(val); v != 15 {
t.Errorf("expected 15, got %v", v)
}
}
示例2: TestMultiRangeScanReverseScanInconsistent
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)
s, db := setupMultipleRanges(t, "b")
defer s.Stop()
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := []string{"a", "b"}
ts := []time.Time{}
for i, key := range keys {
b := &client.Batch{}
b.Put(key, "value")
if err := db.Run(b); err != nil {
t.Fatal(err)
}
ts = append(ts, b.Results[0].Rows[0].Timestamp())
log.Infof("%d: %s", i, b.Results[0].Rows[0].Timestamp())
}
// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
// it does the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())
// Scan.
sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
reply, err := client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, sa)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
// ReverseScan.
rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
reply, err = client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rsa)
if err != nil {
t.Fatal(err)
}
rsr := reply.(*roachpb.ReverseScanResponse)
if l := len(rsr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(rsr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
示例3: TestReplicateAfterSplit
// TestReplicateAfterSplit verifies that a new replica whose start key
// is not KeyMin replicating to a fresh store can apply snapshots correctly.
func TestReplicateAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key := roachpb.Key("z")
store0 := mtc.stores[0]
// Make the split
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 := store0.LookupReplica(roachpb.RKey(key), nil).RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
// Issue an increment for later check.
incArgs := incrementArgs(key, 11)
if _, err := client.SendWrappedWith(rg1(store0), nil, roachpb.Header{
RangeID: rangeID2,
}, &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
mtc.replicateRange(rangeID2, 0, 1)
if mtc.stores[1].LookupReplica(roachpb.RKey(key), nil).GetMaxBytes() == 0 {
t.Error("Range MaxBytes is not set after snapshot applied")
}
// Once it catches up, the effects of increment commands can be seen.
if err := util.IsTrueWithin(func() bool {
getArgs := getArgs(key)
// Reading on non-leader replica should use inconsistent read
reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
RangeID: rangeID2,
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs)
if err != nil {
return false
}
getResp := reply.(*roachpb.GetResponse)
if log.V(1) {
log.Infof("read value %d", mustGetInt(getResp.Value))
}
return mustGetInt(getResp.Value) == 11
}, replicaReadTimeout); err != nil {
t.Fatal(err)
}
}
示例4: TestTxnMultipleCoord
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
testCases := []struct {
args roachpb.Request
writing bool
ok bool
}{
{roachpb.NewGet(roachpb.Key("a")), true, false},
{roachpb.NewGet(roachpb.Key("a")), false, true},
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false, false}, // transactional write before begin
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true, false}, // must have switched coordinators
}
for i, tc := range testCases {
txn := roachpb.NewTransaction("test", roachpb.Key("a"), 1, roachpb.SERIALIZABLE,
s.Clock.Now(), s.Clock.MaxOffset().Nanoseconds())
txn.Writing = tc.writing
reply, pErr := client.SendWrappedWith(sender, nil, roachpb.Header{
Txn: txn,
}, tc.args)
if pErr == nil != tc.ok {
t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
i, tc.args, tc.writing, tc.ok, pErr)
}
if pErr != nil {
continue
}
txn = reply.Header().Txn
// The transaction should come back rw if it started rw or if we just
// wrote.
isWrite := roachpb.IsTransactionWrite(tc.args)
if (tc.writing || isWrite) != txn.Writing {
t.Errorf("%d: unexpected writing state: %s", i, txn)
}
if !isWrite {
continue
}
// Abort for clean shutdown.
if _, pErr := client.SendWrappedWith(sender, nil, roachpb.Header{
Txn: txn,
}, &roachpb.EndTransactionRequest{
Commit: false,
}); pErr != nil {
t.Fatal(pErr)
}
}
}
示例5: TestTxnMultipleCoord
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
for i, tc := range []struct {
args roachpb.Request
writing bool
ok bool
}{
{roachpb.NewGet(roachpb.Key("a")), true, true},
{roachpb.NewGet(roachpb.Key("a")), false, true},
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false, true},
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true, false},
} {
txn := newTxn(s.Clock, roachpb.Key("a"))
txn.Writing = tc.writing
reply, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{
Txn: txn,
}, tc.args)
if err == nil != tc.ok {
t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
i, tc.args, tc.writing, tc.ok, err)
}
if err != nil {
continue
}
txn = reply.Header().Txn
// The transaction should come back rw if it started rw or if we just
// wrote.
isWrite := roachpb.IsTransactionWrite(tc.args)
if (tc.writing || isWrite) != txn.Writing {
t.Errorf("%d: unexpected writing state: %s", i, txn)
}
if !isWrite {
continue
}
// Abort for clean shutdown.
if _, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{
Txn: txn,
}, &roachpb.EndTransactionRequest{
Commit: false,
}); err != nil {
t.Fatal(err)
}
}
}
示例6: TestReplicateAfterSplit
// TestReplicateAfterSplit verifies that a new replica whose start key
// is not KeyMin replicating to a fresh store can apply snapshots correctly.
func TestReplicateAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
key := roachpb.Key("z")
store0 := mtc.stores[0]
// Make the split
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil {
t.Fatal(err)
}
rangeID2 := store0.LookupReplica(roachpb.RKey(key), nil).RangeID
if rangeID2 == rangeID {
t.Errorf("got same range id after split")
}
// Issue an increment for later check.
incArgs := incrementArgs(key, 11)
if _, err := client.SendWrappedWith(rg1(store0), nil, roachpb.Header{
RangeID: rangeID2,
}, &incArgs); err != nil {
t.Fatal(err)
}
// Now add the second replica.
mtc.replicateRange(rangeID2, 1)
if mtc.stores[1].LookupReplica(roachpb.RKey(key), nil).GetMaxBytes() == 0 {
t.Error("Range MaxBytes is not set after snapshot applied")
}
// Once it catches up, the effects of increment commands can be seen.
util.SucceedsWithin(t, replicaReadTimeout, func() error {
getArgs := getArgs(key)
// Reading on non-leader replica should use inconsistent read
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
RangeID: rangeID2,
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return util.Errorf("failed to read data: %s", err)
} else if e, v := int64(11), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
示例7: TestTxnCoordSenderCleanupOnAborted
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a
// TransactionAbortedError, the coordinator cleans up the transaction.
func TestTxnCoordSenderCleanupOnAborted(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Create a transaction with intent at "a".
key := roachpb.Key("a")
txn := newTxn(s.Clock, key)
txn.Priority = 1
put, h := createPutRequest(key, []byte("value"), txn)
if reply, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil {
t.Fatal(err)
} else {
txn = reply.Header().Txn
}
// Push the transaction to abort it.
txn2 := newTxn(s.Clock, key)
txn2.Priority = 2
pushArgs := &roachpb.PushTxnRequest{
RequestHeader: roachpb.RequestHeader{
Key: txn.Key,
},
Now: s.Clock.Now(),
PusherTxn: *txn2,
PusheeTxn: *txn,
PushType: roachpb.ABORT_TXN,
}
if _, err := client.SendWrapped(s.Sender, nil, pushArgs); err != nil {
t.Fatal(err)
}
// Now end the transaction and verify we've cleanup up, even though
// end transaction failed.
etArgs := &roachpb.EndTransactionRequest{
Commit: true,
}
_, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{
Txn: txn,
}, etArgs)
switch err.(type) {
case *roachpb.TransactionAbortedError:
// Expected
default:
t.Fatalf("expected transaction aborted error; got %s", err)
}
verifyCleanup(key, s.Sender, s.Eng, t)
}
示例8: TestTxnCoordSenderBeginTransactionMinPriority
// TestTxnCoordSenderBeginTransactionMinPriority verifies that when starting
// a new transaction, a non-zero priority is treated as a minimum value.
func TestTxnCoordSenderBeginTransactionMinPriority(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(s.Sender)
reply, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{
UserPriority: proto.Int32(-10), // negative user priority is translated into positive priority
Txn: &roachpb.Transaction{
Name: "test txn",
Isolation: roachpb.SNAPSHOT,
Priority: 11,
},
}, &roachpb.PutRequest{
RequestHeader: roachpb.RequestHeader{
Key: roachpb.Key("key"),
},
})
if err != nil {
t.Fatal(err)
}
if prio := reply.(*roachpb.PutResponse).Txn.Priority; prio != 11 {
t.Errorf("expected txn priority 11; got %d", prio)
}
}
示例9: TestTxnCoordSenderBeginTransaction
// TestTxnCoordSenderBeginTransaction verifies that a command sent with a
// not-nil Txn with empty ID gets a new transaction initialized.
func TestTxnCoordSenderBeginTransaction(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(s.Sender)
key := roachpb.Key("key")
reply, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{
UserPriority: proto.Int32(-10), // negative user priority is translated into positive priority
Txn: &roachpb.Transaction{
Name: "test txn",
Isolation: roachpb.SNAPSHOT,
},
}, &roachpb.PutRequest{
RequestHeader: roachpb.RequestHeader{
Key: key,
},
})
if err != nil {
t.Fatal(err)
}
pr := reply.(*roachpb.PutResponse)
if pr.Txn.Name != "test txn" {
t.Errorf("expected txn name to be %q; got %q", "test txn", pr.Txn.Name)
}
if pr.Txn.Priority != 10 {
t.Errorf("expected txn priority 10; got %d", pr.Txn.Priority)
}
if !bytes.Equal(pr.Txn.Key, key) {
t.Errorf("expected txn Key to match %q != %q", key, pr.Txn.Key)
}
if pr.Txn.Isolation != roachpb.SNAPSHOT {
t.Errorf("expected txn isolation to be SNAPSHOT; got %s", pr.Txn.Isolation)
}
}
示例10: process
// process synchronously invokes admin split for each proposed split key.
func (sq *splitQueue) process(now roachpb.Timestamp, rng *Replica,
sysCfg config.SystemConfig) error {
ctx := rng.context(context.TODO())
// First handle case of splitting due to zone config maps.
desc := rng.Desc()
splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)
if len(splitKeys) > 0 {
log.Infof("splitting %s at keys %v", rng, splitKeys)
for _, splitKey := range splitKeys {
if err := sq.db.AdminSplit(splitKey.AsRawKey()); err != nil {
return util.Errorf("unable to split %s at key %q: %s", rng, splitKey, err)
}
}
return nil
}
// Next handle case of splitting due to size.
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return err
}
// FIXME: why is this implementation not the same as the one above?
if float64(rng.stats.GetSize())/float64(zone.RangeMaxBytes) > 1 {
log.Infof("splitting %s size=%d max=%d", rng, rng.stats.GetSize(), zone.RangeMaxBytes)
if _, pErr := client.SendWrappedWith(rng, ctx, roachpb.Header{
Timestamp: now,
}, &roachpb.AdminSplitRequest{
Span: roachpb.Span{Key: desc.StartKey.AsRawKey()},
}); pErr != nil {
return pErr.GoError()
}
}
return nil
}
示例11: TestLeaderRemoveSelf
// TestLeaderRemoveSelf verifies that a leader can remove itself
// without panicking and future access to the range returns a
// RangeNotFoundError (not RaftGroupDeletedError, and even before
// the ReplicaGCQueue has run).
func TestLeaderRemoveSelf(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the replica GC queue. This verifies that the replica is
// considered removed even before the gc queue has run, and also
// helps avoid a deadlock at shutdown.
mtc.stores[0].DisableReplicaGCQueue(true)
raftID := roachpb.RangeID(1)
mtc.replicateRange(raftID, 1)
// Remove the replica from first store.
mtc.unreplicateRange(raftID, 0)
getArgs := getArgs([]byte("a"))
// Force the read command request a new lease.
clock := mtc.clocks[0]
header := roachpb.Header{}
header.Timestamp = clock.Update(clock.Now().Add(int64(storage.DefaultLeaderLeaseDuration), 0))
// Expect get a RangeNotFoundError.
_, pErr := client.SendWrappedWith(rg1(mtc.stores[0]), nil, header, &getArgs)
if _, ok := pErr.GoError().(*roachpb.RangeNotFoundError); !ok {
t.Fatalf("expect get RangeNotFoundError, actual get %v ", pErr)
}
}
示例12: TestTxnCoordSenderGC
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGC(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Set heartbeat interval to 1ms for testing.
s.Sender.heartbeatInterval = 1 * time.Millisecond
txn := newTxn(s.Clock, roachpb.Key("a"))
put, h := createPutRequest(roachpb.Key("a"), []byte("value"), txn)
if _, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil {
t.Fatal(err)
}
// Now, advance clock past the default client timeout.
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
s.Sender.Unlock()
if err := util.IsTrueWithin(func() bool {
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
_, ok := s.Sender.txns[string(txn.ID)]
s.Sender.Unlock()
return !ok
}, 50*time.Millisecond); err != nil {
t.Error("expected garbage collection")
}
}
示例13: fillRange
// fillRange writes keys with the given prefix and associated values
// until bytes bytes have been written or the given range has split.
func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) {
src := rand.New(rand.NewSource(0))
for {
var ms engine.MVCCStats
if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
t.Fatal(err)
}
keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
if keyBytes+valBytes >= bytes {
return
}
key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...)
key = keys.MakeNonColumnKey(key)
val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
pArgs := putArgs(key, val)
_, err := client.SendWrappedWith(store, nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs)
// When the split occurs in the background, our writes may start failing.
// We know we can stop writing when this happens.
if _, ok := err.(*roachpb.RangeKeyMismatchError); ok {
return
} else if err != nil {
t.Fatal(err)
}
}
}
示例14: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
meta1 := keys.Addr(keys.RangeMetaKey(meta2))
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, replicaReadTimeout, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return util.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
示例15: TestTxnCoordSenderKeyRanges
// TestTxnCoordSenderKeyRanges verifies that multiple requests to same or
// overlapping key ranges causes the coordinator to keep track only of
// the minimum number of ranges.
func TestTxnCoordSenderKeyRanges(t *testing.T) {
defer leaktest.AfterTest(t)
ranges := []struct {
start, end roachpb.Key
}{
{roachpb.Key("a"), roachpb.Key(nil)},
{roachpb.Key("a"), roachpb.Key(nil)},
{roachpb.Key("aa"), roachpb.Key(nil)},
{roachpb.Key("b"), roachpb.Key(nil)},
{roachpb.Key("aa"), roachpb.Key("c")},
{roachpb.Key("b"), roachpb.Key("c")},
}
s := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(s.Sender)
txn := newTxn(s.Clock, roachpb.Key("a"))
for _, rng := range ranges {
if rng.end != nil {
delRangeReq, h := createDeleteRangeRequest(rng.start, rng.end, txn)
if _, err := client.SendWrappedWith(s.Sender, nil, h, delRangeReq); err != nil {
t.Fatal(err)
}
} else {
putReq, h := createPutRequest(rng.start, []byte("value"), txn)
if _, err := client.SendWrappedWith(s.Sender, nil, h, putReq); err != nil {
t.Fatal(err)
}
}
txn.Writing = true // required for all but first req
}
// Verify that the transaction metadata contains only two entries
// in its "keys" interval cache. "a" and range "aa"-"c".
txnMeta, ok := s.Sender.txns[string(txn.ID)]
if !ok {
t.Fatalf("expected a transaction to be created on coordinator")
}
if txnMeta.keys.Len() != 2 {
t.Errorf("expected 2 entries in keys interval cache; got %v", txnMeta.keys)
}
}