本文整理匯總了Golang中github.com/cockroachdb/cockroach/internal/client.SendWrappedWith函數的典型用法代碼示例。如果您正苦於以下問題:Golang SendWrappedWith函數的具體用法?Golang SendWrappedWith怎麽用?Golang SendWrappedWith使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了SendWrappedWith函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRejectFutureCommand
// TestRejectFutureCommand verifies that lease holders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
defer leaktest.AfterTest(t)()
const maxOffset = 100 * time.Millisecond
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(maxOffset)
mtc := multiTestContext{
clock: clock,
}
mtc.Start(t, 1)
defer mtc.Stop()
// First do a write. The first write will advance the clock by MaxOffset
// because of the read cache's low water mark.
getArgs := putArgs([]byte("b"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &getArgs); err != nil {
t.Fatal(err)
}
if now := clock.Now(); now.WallTime != int64(maxOffset) {
t.Fatalf("expected clock to advance to 100ms; got %s", now)
}
// The logical clock has advanced past the physical clock; increment
// the "physical" clock to catch up.
manual.Increment(int64(maxOffset))
startTime := manual.UnixNano()
// Commands with a future timestamp that is within the MaxOffset
// bound will be accepted and will cause the clock to advance.
for i := int64(0); i < 3; i++ {
incArgs := incrementArgs([]byte("a"), 5)
ts := hlc.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
}
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Fatalf("expected clock to advance to 190ms; got %s", now)
}
// Once the accumulated offset reaches MaxOffset, commands will be rejected.
incArgs := incrementArgs([]byte("a"), 11)
ts := hlc.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err == nil {
t.Fatalf("expected clock offset error but got nil")
}
// The clock remained at 190ms and the final command was not executed.
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Errorf("expected clock to advance to 190ms; got %s", now)
}
val, _, err := engine.MVCCGet(context.Background(), mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil)
if err != nil {
t.Fatal(err)
}
if v := mustGetInt(val); v != 15 {
t.Errorf("expected 15, got %v", v)
}
}
示例2: TestTxnMultipleCoord
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
testCases := []struct {
args roachpb.Request
writing bool
ok bool
}{
{roachpb.NewGet(roachpb.Key("a")), true, false},
{roachpb.NewGet(roachpb.Key("a")), false, true},
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false, false}, // transactional write before begin
{roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true, false}, // must have switched coordinators
}
for i, tc := range testCases {
txn := roachpb.NewTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE,
s.Clock.Now(), s.Clock.MaxOffset().Nanoseconds())
txn.Writing = tc.writing
reply, pErr := client.SendWrappedWith(sender, nil, roachpb.Header{
Txn: txn,
}, tc.args)
if pErr == nil != tc.ok {
t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
i, tc.args, tc.writing, tc.ok, pErr)
}
if pErr != nil {
continue
}
txn = reply.Header().Txn
// The transaction should come back rw if it started rw or if we just
// wrote.
isWrite := roachpb.IsTransactionWrite(tc.args)
if (tc.writing || isWrite) != txn.Writing {
t.Errorf("%d: unexpected writing state: %s", i, txn)
}
if !isWrite {
continue
}
// Abort for clean shutdown.
if _, pErr := client.SendWrappedWith(sender, nil, roachpb.Header{
Txn: txn,
}, &roachpb.EndTransactionRequest{
Commit: false,
}); pErr != nil {
t.Fatal(pErr)
}
}
}
示例3: fillRange
// fillRange writes keys with the given prefix and associated values
// until bytes bytes have been written or the given range has split.
func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) {
src := rand.New(rand.NewSource(0))
for {
var ms enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), store.Engine(), rangeID, &ms); err != nil {
t.Fatal(err)
}
keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
if keyBytes+valBytes >= bytes {
return
}
key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...)
key = keys.MakeRowSentinelKey(key)
val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
pArgs := putArgs(key, val)
_, pErr := client.SendWrappedWith(store, nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs)
// When the split occurs in the background, our writes may start failing.
// We know we can stop writing when this happens.
if _, ok := pErr.GetDetail().(*roachpb.RangeKeyMismatchError); ok {
return
} else if pErr != nil {
t.Fatal(pErr)
}
}
}
示例4: TestComputeStatsForKeySpan
func TestComputeStatsForKeySpan(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Create a number of ranges using splits.
splitKeys := []string{"a", "c", "e", "g", "i"}
for _, k := range splitKeys {
key := []byte(k)
repl := mtc.stores[0].LookupReplica(key, roachpb.RKeyMin)
args := adminSplitArgs(key, key)
header := roachpb.Header{
RangeID: repl.RangeID,
}
if _, err := client.SendWrappedWith(mtc.stores[0], nil, header, &args); err != nil {
t.Fatal(err)
}
}
// Wait for splits to finish.
util.SucceedsSoon(t, func() error {
repl := mtc.stores[0].LookupReplica(roachpb.RKey("z"), nil)
if actualRSpan := repl.Desc().RSpan(); !actualRSpan.Key.Equal(roachpb.RKey("i")) {
return errors.Errorf("expected range %s to begin at key 'i'", repl)
}
return nil
})
// Create some keys across the ranges.
incKeys := []string{"b", "bb", "bbb", "d", "dd", "h"}
for _, k := range incKeys {
if _, err := mtc.dbs[0].Inc([]byte(k), 5); err != nil {
t.Fatal(err)
}
}
// Verify stats across different spans.
for _, tcase := range []struct {
startKey string
endKey string
expectedRanges int
expectedKeys int64
}{
{"a", "i", 4, 6},
{"a", "c", 1, 3},
{"b", "e", 2, 5},
{"e", "i", 2, 1},
} {
start, end := tcase.startKey, tcase.endKey
stats, count := mtc.stores[0].ComputeStatsForKeySpan(
roachpb.RKey(start), roachpb.RKey(end))
if a, e := count, tcase.expectedRanges; a != e {
t.Errorf("Expected %d ranges in span [%s - %s], found %d", e, start, end, a)
}
if a, e := stats.LiveCount, tcase.expectedKeys; a != e {
t.Errorf("Expected %d keys in span [%s - %s], found %d", e, start, end, a)
}
}
}
示例5: TestRangeCommandClockUpdate
// TestRangeCommandClockUpdate verifies that followers update their
// clocks when executing a command, even if the lease holder's clock is far
// in the future.
func TestRangeCommandClockUpdate(t *testing.T) {
defer leaktest.AfterTest(t)()
const numNodes = 3
var manuals []*hlc.ManualClock
var clocks []*hlc.Clock
for i := 0; i < numNodes; i++ {
manuals = append(manuals, hlc.NewManualClock(1))
clocks = append(clocks, hlc.NewClock(manuals[i].UnixNano))
clocks[i].SetMaxOffset(100 * time.Millisecond)
}
mtc := multiTestContext{
clocks: clocks,
}
mtc.Start(t, numNodes)
defer mtc.Stop()
mtc.replicateRange(1, 1, 2)
// Advance the lease holder's clock ahead of the followers (by more than
// MaxOffset but less than the range lease) and execute a command.
manuals[0].Increment(int64(500 * time.Millisecond))
incArgs := incrementArgs([]byte("a"), 5)
ts := clocks[0].Now()
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
// Wait for that command to execute on all the followers.
util.SucceedsSoon(t, func() error {
values := []int64{}
for _, eng := range mtc.engines {
val, _, err := engine.MVCCGet(context.Background(), eng, roachpb.Key("a"), clocks[0].Now(), true, nil)
if err != nil {
return err
}
values = append(values, mustGetInt(val))
}
if !reflect.DeepEqual(values, []int64{5, 5, 5}) {
return errors.Errorf("expected (5, 5, 5), got %v", values)
}
return nil
})
// Verify that all the followers have accepted the clock update from
// node 0 even though it comes from outside the usual max offset.
now := clocks[0].Now()
for i, clock := range clocks {
// Only compare the WallTimes: it's normal for clock 0 to be a few logical ticks ahead.
if clock.Now().WallTime < now.WallTime {
t.Errorf("clock %d is behind clock 0: %s vs %s", i, clock.Now(), now)
}
}
}
示例6: writeRandomTimeSeriesDataToRange
func writeRandomTimeSeriesDataToRange(
t testing.TB,
store *storage.Store,
rangeID roachpb.RangeID,
keyPrefix []byte,
) (midpoint []byte) {
src := rand.New(rand.NewSource(0))
r := ts.Resolution10s
for i := 0; i < 20; i++ {
var data []tspb.TimeSeriesData
for j := int64(0); j <= src.Int63n(5); j++ {
d := tspb.TimeSeriesData{
Name: "test.random.metric",
Source: "cpu01",
}
for k := int64(0); k <= src.Int63n(10); k++ {
d.Datapoints = append(d.Datapoints, tspb.TimeSeriesDatapoint{
TimestampNanos: src.Int63n(200) * r.KeyDuration(),
Value: src.Float64(),
})
}
data = append(data, d)
}
for _, d := range data {
idatas, err := d.ToInternal(r.KeyDuration(), r.SampleDuration())
if err != nil {
t.Fatal(err)
}
for _, idata := range idatas {
var value roachpb.Value
if err := value.SetProto(&idata); err != nil {
t.Fatal(err)
}
mArgs := roachpb.MergeRequest{
Span: roachpb.Span{
Key: encoding.EncodeVarintAscending(keyPrefix, idata.StartTimestampNanos),
},
Value: value,
}
if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID,
}, &mArgs); pErr != nil {
t.Fatal(pErr)
}
}
}
}
// Return approximate midway point (100 is midway between random timestamps in range [0,200)).
midKey := append([]byte(nil), keyPrefix...)
midKey = encoding.EncodeVarintAscending(midKey, 100*r.KeyDuration())
return keys.MakeRowSentinelKey(midKey)
}
示例7: fillTestRange
func fillTestRange(t testing.TB, rep *Replica, size int) {
src := rand.New(rand.NewSource(0))
for i := 0; i < snapSize/(keySize+valSize); i++ {
key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize))
val := randutil.RandBytes(src, valSize)
pArgs := putArgs(key, val)
if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
}
示例8: getTxn
// getTxn fetches the requested key and returns the transaction info.
func getTxn(coord *TxnCoordSender, txn *roachpb.Transaction) (*roachpb.Transaction, *roachpb.Error) {
hb := &roachpb.HeartbeatTxnRequest{
Span: roachpb.Span{
Key: txn.Key,
},
}
reply, pErr := client.SendWrappedWith(coord, nil, roachpb.Header{
Txn: txn,
}, hb)
if pErr != nil {
return nil, pErr
}
return reply.(*roachpb.HeartbeatTxnResponse).Txn, nil
}
示例9: writeRandomDataToRange
func writeRandomDataToRange(t testing.TB, store *storage.Store, rangeID roachpb.RangeID, keyPrefix []byte) {
src := rand.New(rand.NewSource(0))
for i := 0; i < 100; i++ {
key := append([]byte(nil), keyPrefix...)
key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...)
key = keys.MakeRowSentinelKey(key)
val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
pArgs := putArgs(key, val)
if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
}
示例10: FindRangeLeaseHolder
// FindRangeLeaseHolder returns the current lease holder for the given range. If
// there is no lease at the time of the call, a replica is gets one as a
// side-effect of calling this; if hint is not nil, that replica will be the
// one.
//
// One of the Stores in the cluster is used as a Sender to send a dummy read
// command, which will either result in success (if a replica on that Node has
// the lease), in a NotLeaseHolderError pointing to the current lease holder (if
// there is an active lease), or in the replica on that store acquiring the
// lease (if there isn't an active lease).
// If an active lease existed for the range, it's extended as a side-effect.
func (tc *TestCluster) FindRangeLeaseHolder(
rangeDesc *roachpb.RangeDescriptor,
hint *ReplicationTarget,
) (ReplicationTarget, error) {
var hintReplicaDesc roachpb.ReplicaDescriptor
if hint != nil {
var ok bool
if hintReplicaDesc, ok = rangeDesc.GetReplicaDescriptor(hint.StoreID); !ok {
return ReplicationTarget{}, errors.Errorf(
"bad hint; store doesn't have a replica of the range")
}
} else {
hint = &ReplicationTarget{
NodeID: rangeDesc.Replicas[0].NodeID,
StoreID: rangeDesc.Replicas[0].StoreID}
hintReplicaDesc = rangeDesc.Replicas[0]
}
// TODO(andrei): Using a dummy GetRequest for the purpose of figuring out the
// lease holder is a hack. Instead, we should have a dedicate admin command.
getReq := roachpb.GetRequest{
Span: roachpb.Span{
Key: rangeDesc.StartKey.AsRawKey(),
},
}
store, err := tc.findMemberStore(hint.StoreID)
if err != nil {
return ReplicationTarget{}, err
}
_, pErr := client.SendWrappedWith(
store, nil,
roachpb.Header{RangeID: rangeDesc.RangeID, Replica: hintReplicaDesc},
&getReq)
if pErr != nil {
if nle, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); ok {
if nle.LeaseHolder == nil {
return ReplicationTarget{}, errors.Errorf(
"unexpected NotLeaseHolderError with lease holder unknown")
}
return ReplicationTarget{
NodeID: nle.LeaseHolder.NodeID, StoreID: nle.LeaseHolder.StoreID}, nil
}
return ReplicationTarget{}, pErr.GoError()
}
// The replica we sent the request to either was already or just became
// the lease holder.
return *hint, nil
}
示例11: fillTestRange
func fillTestRange(t testing.TB, rep *Replica, size int64) {
src := rand.New(rand.NewSource(0))
for i := int64(0); i < size/int64(keySize+valSize); i++ {
key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize))
val := randutil.RandBytes(src, valSize)
pArgs := putArgs(key, val)
if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
rep.mu.Lock()
after := rep.mu.state.Stats.Total()
rep.mu.Unlock()
if after < size {
t.Fatalf("range not full after filling: wrote %d, but range at %d", size, after)
}
}
示例12: process
// process synchronously invokes admin split for each proposed split key.
func (sq *splitQueue) process(
ctx context.Context,
now hlc.Timestamp,
rng *Replica,
sysCfg config.SystemConfig,
) error {
// First handle case of splitting due to zone config maps.
desc := rng.Desc()
splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)
if len(splitKeys) > 0 {
log.Infof("splitting %s at keys %v", rng, splitKeys)
log.Trace(ctx, fmt.Sprintf("splitting at keys %v", splitKeys))
for _, splitKey := range splitKeys {
if err := sq.db.AdminSplit(splitKey.AsRawKey()); err != nil {
return errors.Errorf("unable to split %s at key %q: %s", rng, splitKey, err)
}
}
return nil
}
// Next handle case of splitting due to size.
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return err
}
size := rng.GetMVCCStats().Total()
// FIXME: why is this implementation not the same as the one above?
if float64(size)/float64(zone.RangeMaxBytes) > 1 {
log.Infof("splitting %s size=%d max=%d", rng, size, zone.RangeMaxBytes)
log.Trace(ctx, fmt.Sprintf("splitting size=%d max=%d", size, zone.RangeMaxBytes))
if _, pErr := client.SendWrappedWith(rng, ctx, roachpb.Header{
Timestamp: now,
}, &roachpb.AdminSplitRequest{
Span: roachpb.Span{Key: desc.StartKey.AsRawKey()},
}); pErr != nil {
return pErr.GoError()
}
}
return nil
}
示例13: BenchmarkReplicaSnapshot
func BenchmarkReplicaSnapshot(b *testing.B) {
defer tracing.Disable()()
defer config.TestingDisableTableSplits()()
store, stopper, _ := createTestStore(b)
// We want to manually control the size of the raft log.
store.DisableRaftLogQueue(true)
defer stopper.Stop()
const rangeID = 1
const keySize = 1 << 7 // 128 B
const valSize = 1 << 10 // 1 KiB
const snapSize = 1 << 25 // 32 MiB
rep, err := store.GetReplica(rangeID)
if err != nil {
b.Fatal(err)
}
src := rand.New(rand.NewSource(0))
for i := 0; i < snapSize/(keySize+valSize); i++ {
key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize))
val := randutil.RandBytes(src, valSize)
pArgs := putArgs(key, val)
if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs); pErr != nil {
b.Fatal(pErr)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := rep.GetSnapshot(); err != nil {
b.Fatal(err)
}
}
}
示例14: writeRandomDataToRange
func writeRandomDataToRange(
t testing.TB,
store *storage.Store,
rangeID roachpb.RangeID,
keyPrefix []byte,
) (midpoint []byte) {
src := rand.New(rand.NewSource(0))
for i := 0; i < 100; i++ {
key := append([]byte(nil), keyPrefix...)
key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...)
key = keys.MakeRowSentinelKey(key)
val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
pArgs := putArgs(key, val)
if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
// Return approximate midway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz").
midKey := append([]byte(nil), keyPrefix...)
midKey = append(midKey, []byte("Z")...)
return keys.MakeRowSentinelKey(midKey)
}
示例15: TestTxnPutOutOfOrder
//.........這裏部分代碼省略.........
err := store.DB().Put(key, initVal)
if err != nil {
t.Fatalf("failed to put: %s", err)
}
waitPut := make(chan struct{})
waitFirstGet := make(chan struct{})
waitTxnRestart := make(chan struct{})
waitSecondGet := make(chan struct{})
waitTxnComplete := make(chan struct{})
// Start the Writer.
go func() {
epoch := -1
// Start a txn that does read-after-write.
// The txn will be restarted twice, and the out-of-order put
// will happen in the second epoch.
if err := store.DB().Txn(func(txn *client.Txn) error {
epoch++
if epoch == 1 {
// Wait until the second get operation is issued.
close(waitTxnRestart)
<-waitSecondGet
}
updatedVal := []byte("updatedVal")
if err := txn.Put(key, updatedVal); err != nil {
return err
}
// Make sure a get will return the value that was just written.
actual, err := txn.Get(key)
if err != nil {
return err
}
if !bytes.Equal(actual.ValueBytes(), updatedVal) {
t.Fatalf("unexpected get result: %s", actual)
}
if epoch == 0 {
// Wait until the first get operation will push the txn timestamp.
close(waitPut)
<-waitFirstGet
}
b := txn.NewBatch()
return txn.CommitInBatch(b)
}); err != nil {
t.Fatal(err)
}
if epoch != 2 {
t.Fatalf("unexpected number of txn retries: %d", epoch)
}
close(waitTxnComplete)
}()
<-waitPut
// Start the Reader.
// Advance the clock and send a get operation with higher
// priority to trigger the txn restart.
manualClock.Increment(100)
priority := roachpb.UserPriority(-math.MaxInt32)
requestHeader := roachpb.Span{
Key: roachpb.Key(key),
}
ts := clock.Now()
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
Timestamp: ts,
UserPriority: priority,
}, &roachpb.GetRequest{Span: requestHeader}); err != nil {
t.Fatalf("failed to get: %s", err)
}
// Wait until the writer restarts the txn.
close(waitFirstGet)
<-waitTxnRestart
// Advance the clock and send a get operation again. This time
// we use TestingCommandFilter so that a get operation is not
// processed after the write intent is resolved (to prevent the
// timestamp cache from being updated).
manualClock.Increment(100)
ts = clock.Now()
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
Timestamp: ts,
UserPriority: priority,
}, &roachpb.GetRequest{Span: requestHeader}); err == nil {
t.Fatal("unexpected success of get")
}
close(waitSecondGet)
<-waitTxnComplete
}