本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/uuid.MakeV4函數的典型用法代碼示例。如果您正苦於以下問題:Golang MakeV4函數的具體用法?Golang MakeV4怎麽用?Golang MakeV4使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了MakeV4函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestTimestampCacheEqualTimestamps
// TestTimestampCacheEqualTimestamp verifies that in the event of two
// non-overlapping transactions with equal timestamps, the returned
// timestamp is not owned by either one.
func TestTimestampCacheEqualTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
tc := newTimestampCache(clock)
txn1 := uuid.MakeV4()
txn2 := uuid.MakeV4()
// Add two non-overlapping transactions at the same timestamp.
ts1 := clock.Now()
tc.add(roachpb.Key("a"), roachpb.Key("b"), ts1, &txn1, true)
tc.add(roachpb.Key("b"), roachpb.Key("c"), ts1, &txn2, true)
// When querying either side separately, the transaction ID is returned.
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("b")); !ts.Equal(ts1) {
t.Errorf("expected 'a'-'b' to have timestamp %s, but found %s", ts1, ts)
} else if *txn != txn1 {
t.Errorf("expected 'a'-'b' to have txn id %s, but found %s", txn1, txn)
}
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("b"), roachpb.Key("c")); !ts.Equal(ts1) {
t.Errorf("expected 'b'-'c' to have timestamp %s, but found %s", ts1, ts)
} else if *txn != txn2 {
t.Errorf("expected 'b'-'c' to have txn id %s, but found %s", txn2, txn)
}
// Querying a span that overlaps both returns a nil txn ID; neither
// can proceed here.
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("c")); !ts.Equal(ts1) {
t.Errorf("expected 'a'-'c' to have timestamp %s, but found %s", ts1, ts)
} else if txn != nil {
t.Errorf("expected 'a'-'c' to have nil txn id, but found %s", txn)
}
}
示例2: TestTxnEqual
func TestTxnEqual(t *testing.T) {
u1, u2 := uuid.MakeV4(), uuid.MakeV4()
tc := []struct {
txn1, txn2 *Transaction
eq bool
}{
{nil, nil, true},
{&Transaction{}, nil, false},
{&Transaction{TxnMeta: enginepb.TxnMeta{ID: &u1}}, &Transaction{TxnMeta: enginepb.TxnMeta{ID: &u2}}, false},
}
for i, c := range tc {
if c.txn1.Equal(c.txn2) != c.txn2.Equal(c.txn1) || c.txn1.Equal(c.txn2) != c.eq {
t.Errorf("%d: wanted %t", i, c.eq)
}
}
}
示例3: createRangeData
// createRangeData creates sample range data in all possible areas of
// the key space. Returns a slice of the encoded keys of all created
// data.
func createRangeData(t *testing.T, r *Replica) []engine.MVCCKey {
ts0 := hlc.ZeroTimestamp
ts := hlc.Timestamp{WallTime: 1}
desc := r.Desc()
keyTSs := []struct {
key roachpb.Key
ts hlc.Timestamp
}{
{keys.AbortCacheKey(r.RangeID, testTxnID), ts0},
{keys.AbortCacheKey(r.RangeID, testTxnID2), ts0},
{keys.RangeFrozenStatusKey(r.RangeID), ts0},
{keys.RangeLastGCKey(r.RangeID), ts0},
{keys.RaftAppliedIndexKey(r.RangeID), ts0},
{keys.RaftTruncatedStateKey(r.RangeID), ts0},
{keys.RangeLeaseKey(r.RangeID), ts0},
{keys.LeaseAppliedIndexKey(r.RangeID), ts0},
{keys.RangeStatsKey(r.RangeID), ts0},
{keys.RangeTxnSpanGCThresholdKey(r.RangeID), ts0},
{keys.RaftHardStateKey(r.RangeID), ts0},
{keys.RaftLastIndexKey(r.RangeID), ts0},
{keys.RaftLogKey(r.RangeID, 1), ts0},
{keys.RaftLogKey(r.RangeID, 2), ts0},
{keys.RangeLastReplicaGCTimestampKey(r.RangeID), ts0},
{keys.RangeLastVerificationTimestampKeyDeprecated(r.RangeID), ts0},
{keys.RangeDescriptorKey(desc.StartKey), ts},
{keys.TransactionKey(roachpb.Key(desc.StartKey), uuid.MakeV4()), ts0},
{keys.TransactionKey(roachpb.Key(desc.StartKey.Next()), uuid.MakeV4()), ts0},
{keys.TransactionKey(fakePrevKey(desc.EndKey), uuid.MakeV4()), ts0},
// TODO(bdarnell): KeyMin.Next() results in a key in the reserved system-local space.
// Once we have resolved https://github.com/cockroachdb/cockroach/issues/437,
// replace this with something that reliably generates the first valid key in the range.
//{r.Desc().StartKey.Next(), ts},
// The following line is similar to StartKey.Next() but adds more to the key to
// avoid falling into the system-local space.
{append(append([]byte{}, desc.StartKey...), '\x02'), ts},
{fakePrevKey(r.Desc().EndKey), ts},
}
keys := []engine.MVCCKey{}
for _, keyTS := range keyTSs {
if err := engine.MVCCPut(context.Background(), r.store.Engine(), nil, keyTS.key, keyTS.ts, roachpb.MakeValueFromString("value"), nil); err != nil {
t.Fatal(err)
}
keys = append(keys, engine.MVCCKey{Key: keyTS.key, Timestamp: keyTS.ts})
}
return keys
}
示例4: TestTxnIDEqual
func TestTxnIDEqual(t *testing.T) {
txn1, txn2 := uuid.MakeV4(), uuid.MakeV4()
txn1Copy := txn1
testCases := []struct {
a, b *uuid.UUID
expEqual bool
}{
{&txn1, &txn1, true},
{&txn1, &txn2, false},
{&txn1, &txn1Copy, true},
}
for i, test := range testCases {
if eq := TxnIDEqual(test.a, test.b); eq != test.expEqual {
t.Errorf("%d: expected %q == %q: %t; got %t", i, test.a, test.b, test.expEqual, eq)
}
}
}
示例5: TestCloneProto
func TestCloneProto(t *testing.T) {
u := uuid.MakeV4()
testCases := []struct {
pb proto.Message
shouldPanic bool
}{
{&roachpb.StoreIdent{}, false},
{&roachpb.StoreIdent{ClusterID: uuid.MakeV4()}, true},
{&enginepb.TxnMeta{}, false},
{&enginepb.TxnMeta{ID: &u}, true},
{&roachpb.Transaction{}, false},
{&config.ZoneConfig{RangeMinBytes: 123, RangeMaxBytes: 456}, false},
}
for _, tc := range testCases {
var clone proto.Message
var panicObj interface{}
func() {
defer func() {
panicObj = recover()
}()
clone = protoutil.Clone(tc.pb)
}()
if tc.shouldPanic {
if panicObj == nil {
t.Errorf("%T: expected panic but didn't get one", tc.pb)
}
} else {
if panicObj != nil {
if panicStr := fmt.Sprint(panicObj); !strings.Contains(panicStr, "attempt to clone") {
t.Errorf("%T: got unexpected panic %s", tc.pb, panicStr)
}
}
}
if panicObj == nil {
realClone := proto.Clone(tc.pb)
if !reflect.DeepEqual(clone, realClone) {
t.Errorf("%T: clone did not equal original. expected:\n%+v\ngot:\n%+v", tc.pb, realClone, clone)
}
}
}
}
示例6: TestKeyAddress
func TestKeyAddress(t *testing.T) {
testCases := []struct {
key roachpb.Key
expAddress roachpb.RKey
}{
{roachpb.Key{}, roachpb.RKeyMin},
{roachpb.Key("123"), roachpb.RKey("123")},
{RangeDescriptorKey(roachpb.RKey("foo")), roachpb.RKey("foo")},
{TransactionKey(roachpb.Key("baz"), uuid.MakeV4()), roachpb.RKey("baz")},
{TransactionKey(roachpb.KeyMax, uuid.MakeV4()), roachpb.RKeyMax},
{RangeDescriptorKey(roachpb.RKey(TransactionKey(roachpb.Key("doubleBaz"), uuid.MakeV4()))), roachpb.RKey("doubleBaz")},
{nil, nil},
}
for i, test := range testCases {
if keyAddr, err := Addr(test.key); err != nil {
t.Errorf("%d: %v", i, err)
} else if !keyAddr.Equal(test.expAddress) {
t.Errorf("%d: expected address for key %q doesn't match %q", i, test.key, test.expAddress)
}
}
}
示例7: NewLeaseManager
// NewLeaseManager allocates a new LeaseManager.
func NewLeaseManager(db *DB, clock *hlc.Clock, options LeaseManagerOptions) *LeaseManager {
if options.ClientID == "" {
options.ClientID = uuid.MakeV4().String()
}
if options.LeaseDuration <= 0 {
options.LeaseDuration = DefaultLeaseDuration
}
return &LeaseManager{
db: db,
clock: clock,
clientID: options.ClientID,
leaseDuration: options.LeaseDuration,
}
}
示例8: TestTimestampCacheReadVsWrite
// TestTimestampCacheReadVsWrite verifies that the timestamp cache
// can differentiate between read and write timestamp.
func TestTimestampCacheReadVsWrite(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
tc := newTimestampCache(clock)
// Add read-only non-txn entry at current time.
ts1 := clock.Now()
tc.add(roachpb.Key("a"), roachpb.Key("b"), ts1, nil, true)
// Add two successive txn entries; one read-only and one read-write.
txn1ID := uuid.MakeV4()
txn2ID := uuid.MakeV4()
ts2 := clock.Now()
tc.add(roachpb.Key("a"), nil, ts2, &txn1ID, true)
ts3 := clock.Now()
tc.add(roachpb.Key("a"), nil, ts3, &txn2ID, false)
rTS, _, rOK := tc.GetMaxRead(roachpb.Key("a"), nil)
wTS, _, wOK := tc.GetMaxWrite(roachpb.Key("a"), nil)
if !rTS.Equal(ts2) || !wTS.Equal(ts3) || !rOK || !wOK {
t.Errorf("expected %s %s; got %s %s; rOK=%t, wOK=%t", ts2, ts3, rTS, wTS, rOK, wOK)
}
}
示例9: TestKeyAddressError
func TestKeyAddressError(t *testing.T) {
testCases := map[string][]roachpb.Key{
"store-local key .* is not addressable": {
StoreIdentKey(),
StoreGossipKey(),
},
"local range ID key .* is not addressable": {
AbortCacheKey(0, uuid.MakeV4()),
RaftTombstoneKey(0),
RaftAppliedIndexKey(0),
RaftTruncatedStateKey(0),
RangeLeaseKey(0),
RangeStatsKey(0),
RaftHardStateKey(0),
RaftLastIndexKey(0),
RaftLogPrefix(0),
RaftLogKey(0, 0),
RangeLastReplicaGCTimestampKey(0),
RangeLastVerificationTimestampKeyDeprecated(0),
RangeDescriptorKey(roachpb.RKey(RangeLastVerificationTimestampKeyDeprecated(0))),
},
"local key .* malformed": {
makeKey(localPrefix, roachpb.Key("z")),
},
}
for regexp, keyList := range testCases {
for _, key := range keyList {
if addr, err := Addr(key); err == nil {
t.Errorf("expected addressing key %q to throw error, but it returned address %q",
key, addr)
} else if !testutils.IsError(err, regexp) {
t.Errorf("expected addressing key %q to throw error matching %s, but got error %v",
key, regexp, err)
}
}
}
}
示例10: NewTransaction
// NewTransaction creates a new transaction. The transaction key is
// composed using the specified baseKey (for locality with data
// affected by the transaction) and a random ID to guarantee
// uniqueness. The specified user-level priority is combined with a
// randomly chosen value to yield a final priority, used to settle
// write conflicts in a way that avoids starvation of long-running
// transactions (see Replica.PushTxn).
func NewTransaction(
name string,
baseKey Key,
userPriority UserPriority,
isolation enginepb.IsolationType,
now hlc.Timestamp,
maxOffset int64,
) *Transaction {
u := uuid.MakeV4()
return &Transaction{
TxnMeta: enginepb.TxnMeta{
Key: baseKey,
ID: &u,
Isolation: isolation,
Timestamp: now,
Priority: MakePriority(userPriority),
Sequence: 1,
},
Name: name,
OrigTimestamp: now,
MaxTimestamp: now.Add(maxOffset, 0),
}
}
示例11: TestTransactionUpdate
func TestTransactionUpdate(t *testing.T) {
txn := nonZeroTxn
if err := zerofields.NoZeroField(txn); err != nil {
t.Fatal(err)
}
var txn2 Transaction
txn2.Update(&txn)
if err := zerofields.NoZeroField(txn2); err != nil {
t.Fatal(err)
}
u := uuid.MakeV4()
var txn3 Transaction
txn3.ID = &u
txn3.Name = "carl"
txn3.Isolation = enginepb.SNAPSHOT
txn3.Update(&txn)
if err := zerofields.NoZeroField(txn3); err != nil {
t.Fatal(err)
}
}
示例12: snapshot
// snapshot creates an OutgoingSnapshot containing a rocksdb snapshot for the given range.
func snapshot(
ctx context.Context,
snapType string,
snap engine.Reader,
rangeID roachpb.RangeID,
eCache *raftEntryCache,
startKey roachpb.RKey,
) (OutgoingSnapshot, error) {
var desc roachpb.RangeDescriptor
// We ignore intents on the range descriptor (consistent=false) because we
// know they cannot be committed yet; operations that modify range
// descriptors resolve their own intents when they commit.
ok, err := engine.MVCCGetProto(ctx, snap, keys.RangeDescriptorKey(startKey),
hlc.MaxTimestamp, false /* !consistent */, nil, &desc)
if err != nil {
return OutgoingSnapshot{}, errors.Errorf("failed to get desc: %s", err)
}
if !ok {
return OutgoingSnapshot{}, errors.Errorf("couldn't find range descriptor")
}
var snapData roachpb.RaftSnapshotData
// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
snapData.RangeDescriptor = desc
// Read the range metadata from the snapshot instead of the members
// of the Range struct because they might be changed concurrently.
appliedIndex, _, err := loadAppliedIndex(ctx, snap, rangeID)
if err != nil {
return OutgoingSnapshot{}, err
}
// Synthesize our raftpb.ConfState from desc.
var cs raftpb.ConfState
for _, rep := range desc.Replicas {
cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
}
term, err := term(ctx, snap, rangeID, eCache, appliedIndex)
if err != nil {
return OutgoingSnapshot{}, errors.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
}
state, err := loadState(ctx, snap, &desc)
if err != nil {
return OutgoingSnapshot{}, err
}
// Intentionally let this iterator and the snapshot escape so that the
// streamer can send chunks from it bit by bit.
iter := NewReplicaDataIterator(&desc, snap, true /* replicatedOnly */)
snapUUID := uuid.MakeV4()
log.Infof(ctx, "generated %s snapshot %s at index %d",
snapType, snapUUID.Short(), appliedIndex)
return OutgoingSnapshot{
EngineSnap: snap,
Iter: iter,
State: state,
SnapUUID: snapUUID,
RaftSnap: raftpb.Snapshot{
Data: snapUUID.GetBytes(),
Metadata: raftpb.SnapshotMetadata{
Index: appliedIndex,
Term: term,
ConfState: cs,
},
},
}, nil
}
示例13: TestTruncate
//.........這裏部分代碼省略.........
{
// Range-local range contained in active range.
keys: [][2]string{{locPrefix("b"), loc("b")}},
expKeys: [][2]string{{locPrefix("b"), loc("b")}},
from: "b", to: "c",
},
{
// Mixed range-local vs global key range.
keys: [][2]string{{loc("c"), "d\x00"}},
from: "b", to: "e",
err: "local key mixed with global key",
},
{
// Key range touching and intersecting active range.
keys: [][2]string{{"a", "b"}, {"a", "c"}, {"p", "q"}, {"p", "r"}, {"a", "z"}},
expKeys: [][2]string{{}, {"b", "c"}, {"p", "q"}, {"p", "q"}, {"b", "q"}},
from: "b", to: "q",
},
// Active key range is intersection of descriptor and [from,to).
{
keys: [][2]string{{"c", "q"}},
expKeys: [][2]string{{"d", "p"}},
from: "a", to: "z",
desc: [2]string{"d", "p"},
},
{
keys: [][2]string{{"c", "q"}},
expKeys: [][2]string{{"d", "p"}},
from: "d", to: "p",
desc: [2]string{"a", "z"},
},
}
for i, test := range testCases {
goldenOriginal := roachpb.BatchRequest{}
for _, ks := range test.keys {
if len(ks[1]) > 0 {
u := uuid.MakeV4()
goldenOriginal.Add(&roachpb.ResolveIntentRangeRequest{
Span: roachpb.Span{Key: roachpb.Key(ks[0]), EndKey: roachpb.Key(ks[1])},
IntentTxn: enginepb.TxnMeta{ID: &u},
})
} else {
goldenOriginal.Add(&roachpb.GetRequest{
Span: roachpb.Span{Key: roachpb.Key(ks[0])},
})
}
}
original := roachpb.BatchRequest{Requests: make([]roachpb.RequestUnion, len(goldenOriginal.Requests))}
for i, request := range goldenOriginal.Requests {
original.Requests[i].SetValue(request.GetInner().ShallowCopy())
}
desc := &roachpb.RangeDescriptor{
StartKey: roachpb.RKey(test.desc[0]), EndKey: roachpb.RKey(test.desc[1]),
}
if len(desc.StartKey) == 0 {
desc.StartKey = roachpb.RKey(test.from)
}
if len(desc.EndKey) == 0 {
desc.EndKey = roachpb.RKey(test.to)
}
rs := roachpb.RSpan{Key: roachpb.RKey(test.from), EndKey: roachpb.RKey(test.to)}
rs, err := rs.Intersect(desc)
if err != nil {
t.Errorf("%d: intersection failure: %v", i, err)
continue
}
ba, num, err := truncate(original, rs)
if err != nil || test.err != "" {
if !testutils.IsError(err, test.err) {
t.Errorf("%d: %v (expected: %q)", i, err, test.err)
}
continue
}
var reqs int
for j, arg := range ba.Requests {
req := arg.GetInner()
if _, ok := req.(*roachpb.NoopRequest); ok {
continue
}
if h := req.Header(); !bytes.Equal(h.Key, roachpb.Key(test.expKeys[j][0])) || !bytes.Equal(h.EndKey, roachpb.Key(test.expKeys[j][1])) {
t.Errorf("%d.%d: range mismatch: actual [%q,%q), wanted [%q,%q)", i, j,
h.Key, h.EndKey, test.expKeys[j][0], test.expKeys[j][1])
} else if _, ok := req.(*roachpb.NoopRequest); ok != (len(h.Key) == 0) {
t.Errorf("%d.%d: expected NoopRequest, got %T", i, j, req)
} else if len(h.Key) != 0 {
reqs++
}
}
if reqs != num {
t.Errorf("%d: counted %d requests, but truncation indicated %d", i, reqs, num)
}
if !reflect.DeepEqual(original, goldenOriginal) {
t.Errorf("%d: truncation mutated original:\nexpected: %s\nactual: %s",
i, goldenOriginal, original)
}
}
}
示例14: PlanAndRun
// PlanAndRun generates a physical plan from a planNode tree and executes it. It
// assumes that the tree is supported (see CheckSupport).
//
// Note that errors that happen while actually running the flow are reported to
// recv, not returned by this function.
func (dsp *distSQLPlanner) PlanAndRun(
ctx context.Context, txn *client.Txn, tree planNode, recv *distSQLReceiver,
) error {
// Trigger limit propagation.
tree.SetLimitHint(math.MaxInt64, true)
planCtx := planningCtx{
ctx: ctx,
spanIter: dsp.spanResolver.NewSpanResolverIterator(),
nodeAddresses: make(map[roachpb.NodeID]string),
}
thisNodeID := dsp.nodeDesc.NodeID
planCtx.nodeAddresses[thisNodeID] = dsp.nodeDesc.Address.String()
plan, err := dsp.createPlanForNode(&planCtx, tree)
if err != nil {
return err
}
// If we don't already have a single result router on this node, add a final
// stage.
if len(plan.resultRouters) != 1 ||
plan.processors[plan.resultRouters[0]].node != thisNodeID {
dsp.addSingleGroupStage(
&plan, thisNodeID, distsql.ProcessorCoreUnion{Noop: &distsql.NoopCoreSpec{}},
)
if len(plan.resultRouters) != 1 {
panic(fmt.Sprintf("%d results after single group stage", len(plan.resultRouters)))
}
}
// Set up the endpoints for p.streams.
dsp.populateEndpoints(&planCtx, &plan)
// Set up the endpoint for the final result.
finalOut := &plan.processors[plan.resultRouters[0]].spec.Output[0]
finalOut.Streams = append(finalOut.Streams, distsql.StreamEndpointSpec{
Type: distsql.StreamEndpointSpec_SYNC_RESPONSE,
})
recv.resultToStreamColMap = plan.planToStreamColMap
// Split the processors by nodeID to create the FlowSpecs.
flowID := distsql.FlowID{UUID: uuid.MakeV4()}
nodeIDMap := make(map[roachpb.NodeID]int)
// nodeAddresses contains addresses for the nodes that were referenced during
// planning, so we're likely going to have this many nodes (and we have one
// flow per node).
nodeIDs := make([]roachpb.NodeID, 0, len(planCtx.nodeAddresses))
flows := make([]distsql.FlowSpec, 0, len(planCtx.nodeAddresses))
for _, p := range plan.processors {
idx, ok := nodeIDMap[p.node]
if !ok {
flow := distsql.FlowSpec{FlowID: flowID}
idx = len(flows)
flows = append(flows, flow)
nodeIDs = append(nodeIDs, p.node)
nodeIDMap[p.node] = idx
}
flows[idx].Processors = append(flows[idx].Processors, p.spec)
}
// Start the flows on all other nodes.
for i, nodeID := range nodeIDs {
if nodeID == thisNodeID {
// Skip this node.
continue
}
req := distsql.SetupFlowRequest{
Txn: txn.Proto,
Flow: flows[i],
}
if err := distsql.SetFlowRequestTrace(ctx, &req); err != nil {
return err
}
conn, err := dsp.rpcContext.GRPCDial(planCtx.nodeAddresses[nodeID])
if err != nil {
return err
}
client := distsql.NewDistSQLClient(conn)
// TODO(radu): we are not waiting for the flows to complete, but we are
// still waiting for a round trip; we should start the flows in parallel, at
// least if there are enough of them.
if resp, err := client.SetupFlow(context.Background(), &req); err != nil {
return err
} else if resp.Error != nil {
return resp.Error.GoError()
}
}
localReq := distsql.SetupFlowRequest{
Txn: txn.Proto,
Flow: flows[nodeIDMap[thisNodeID]],
}
if err := distsql.SetFlowRequestTrace(ctx, &localReq); err != nil {
//.........這裏部分代碼省略.........
示例15: TestTxnCoordSenderErrorWithIntent
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, 20*time.Nanosecond)
u := uuid.MakeV4()
testCases := []struct {
roachpb.Error
errMsg string
}{
{*roachpb.NewError(roachpb.NewTransactionRetryError()), "retry txn"},
{
*roachpb.NewError(roachpb.NewTransactionPushError(roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{ID: &u}}),
), "failed to push",
},
{*roachpb.NewErrorf("testError"), "testError"},
}
for i, test := range testCases {
func() {
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
txn := ba.Txn.Clone()
txn.Writing = true
pErr := &roachpb.Error{}
*pErr = test.Error
pErr.SetTxn(&txn)
return nil, pErr
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient,
senderFn(senderFunc),
clock,
false,
stopper,
MakeTxnMetrics(metric.TestSampleInterval),
)
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if !testutils.IsPError(pErr, test.errMsg) {
t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr)
}
defer teardownHeartbeats(ts)
ts.Lock()
defer ts.Unlock()
if len(ts.txns) != 1 {
t.Errorf("%d: expected transaction to be tracked", i)
}
}()
}
}