本文整理匯總了Golang中github.com/cockroachdb/cockroach/roachpb.BatchRequest.Txn方法的典型用法代碼示例。如果您正苦於以下問題:Golang BatchRequest.Txn方法的具體用法?Golang BatchRequest.Txn怎麽用?Golang BatchRequest.Txn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/roachpb.BatchRequest
的用法示例。
在下文中一共展示了BatchRequest.Txn方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestTxnCoordSenderErrorWithIntent
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
txn := ba.Txn.Clone()
txn.Writing = true
pErr := roachpb.NewError(roachpb.NewTransactionRetryError())
pErr.SetTxn(txn)
return nil, pErr
}), clock, false, nil, stopper)
defer stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
if _, pErr := ts.Send(context.Background(), ba); !testutils.IsPError(pErr, "retry txn") {
t.Fatalf("unexpected error: %v", pErr)
}
defer teardownHeartbeats(ts)
ts.Lock()
defer ts.Unlock()
if len(ts.txns) != 1 {
t.Fatalf("expected transaction to be tracked")
}
}
示例2: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br := ba.CreateReply()
br.Txn = ba.Txn.Clone()
br.Txn.Writing = true
return br, nil
}), clock, false, nil, stopper)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例3: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GoError().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
示例4: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// TODO(tschottdorf): see about using only the top-level *roachpb.Error
// information for this restart logic (includes adding the Txn).
err := pErr.GoError()
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if err == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if abrtErr, ok := err.(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
if abrtTxn := abrtErr.Transaction(); abrtTxn != nil {
// Acts as a minimum priority on restart.
ts.Proto.Priority = abrtTxn.Priority
}
} else if txnErr, ok := err.(roachpb.TransactionRestartError); ok {
ts.Proto.Update(txnErr.Transaction())
}
return nil, pErr
}
示例5: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
var store *Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, err = ls.lookupReplica(rs.Key, rs.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
if err != nil {
return nil, roachpb.NewError(err)
}
sp, cleanupSp := tracing.SpanFromContext(opStores, store.Tracer(), ctx)
defer cleanupSp()
if ba.Txn != nil {
// For calls that read data within a txn, we keep track of timestamps
// observed from the various participating nodes' HLC clocks. If we have
// a timestamp on file for this Node which is smaller than MaxTimestamp,
// we can lower MaxTimestamp accordingly. If MaxTimestamp drops below
// OrigTimestamp, we effectively can't see uncertainty restarts any
// more.
// Note that it's not an issue if MaxTimestamp propagates back out to
// the client via a returned Transaction update - when updating a Txn
// from another, the larger MaxTimestamp wins.
if maxTS, ok := ba.Txn.GetObservedTimestamp(ba.Replica.NodeID); ok && maxTS.Less(ba.Txn.MaxTimestamp) {
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// The uncertainty window is [OrigTimestamp, maxTS), so if that window
// is empty, there won't be any uncertainty restarts.
if !ba.Txn.OrigTimestamp.Less(maxTS) {
sp.LogEvent("read has no clock uncertainty")
}
shallowTxn.MaxTimestamp.Backward(maxTS)
ba.Txn = &shallowTxn
}
}
br, pErr := store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例6: heartbeat
func (tc *TxnCoordSender) heartbeat(id string, trace *tracer.Trace, ctx context.Context) bool {
tc.Lock()
proceed := true
txnMeta := tc.txns[id]
// Before we send a heartbeat, determine whether this transaction
// should be considered abandoned. If so, exit heartbeat.
if txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) {
// TODO(tschottdorf): should we be more proactive here?
// The client might be continuing the transaction
// through another coordinator, but in the most likely
// case it's just gone and the open transaction record
// could block concurrent operations.
if log.V(1) {
log.Infof("transaction %s abandoned; stopping heartbeat",
txnMeta.txn)
}
proceed = false
}
// txnMeta.txn is possibly replaced concurrently,
// so grab a copy before unlocking.
txn := txnMeta.txn
tc.Unlock()
if !proceed {
return false
}
hb := &roachpb.HeartbeatTxnRequest{}
hb.Key = txn.Key
ba := roachpb.BatchRequest{}
ba.Timestamp = tc.clock.Now()
ba.CmdID = ba.GetOrCreateCmdID(ba.Timestamp.WallTime)
ba.Txn = txn.Clone()
ba.Add(hb)
epochEnds := trace.Epoch("heartbeat")
_, err := tc.wrapped.Send(ctx, ba)
epochEnds()
// If the transaction is not in pending state, then we can stop
// the heartbeat. It's either aborted or committed, and we resolve
// write intents accordingly.
if err != nil {
log.Warningf("heartbeat to %s failed: %s", txn, err)
}
// TODO(bdarnell): once we have gotten a heartbeat response with
// Status != PENDING, future heartbeats are useless. However, we
// need to continue the heartbeatLoop until the client either
// commits or abandons the transaction. We could save a little
// pointless work by restructuring this loop to stop sending
// heartbeats between the time that the transaction is aborted and
// the client finds out. Furthermore, we could use this information
// to send TransactionAbortedErrors to the client so it can restart
// immediately instead of running until its EndTransaction.
return true
}
示例7: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
sp := tracing.SpanFromContext(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=OrigTimestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
sp.LogEvent("read has no clock uncertainty")
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// We set to OrigTimestamp because that works for both SNAPSHOT and
// SERIALIZABLE: If we used Timestamp instead, we could run into
// unnecessary retries at SNAPSHOT. For example, a SNAPSHOT txn at
// OrigTimestamp = 1000.0, Timestamp = 2000.0, MaxTimestamp = 3000.0
// will always read at 1000, so a MaxTimestamp of 2000 will still let
// it restart with uncertainty when it finds a value in (1000, 2000).
shallowTxn.MaxTimestamp = ba.Txn.OrigTimestamp
ba.Txn = &shallowTxn
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例8: TestTxnCoordSenderErrorWithIntent
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
testCases := []struct {
roachpb.Error
errMsg string
}{
{*roachpb.NewError(roachpb.NewTransactionRetryError()), "retry txn"},
{*roachpb.NewError(roachpb.NewTransactionPushError(roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
ID: uuid.NewV4(),
}})), "failed to push"},
{*roachpb.NewErrorf("testError"), "testError"},
}
for i, test := range testCases {
func() {
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
txn := ba.Txn.Clone()
txn.Writing = true
pErr := &roachpb.Error{}
*pErr = test.Error
pErr.SetTxn(&txn)
return nil, pErr
}
ctx := tracing.WithTracer(context.Background(), tracing.NewTracer())
ts := NewTxnCoordSender(ctx, senderFn(senderFunc), clock, false, stopper, MakeTxnMetrics())
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if !testutils.IsPError(pErr, test.errMsg) {
t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr)
}
defer teardownHeartbeats(ts)
ts.Lock()
defer ts.Unlock()
if len(ts.txns) != 1 {
t.Errorf("%d: expected transaction to be tracked", i)
}
}()
}
}
示例9: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
if ts.UserPriority > 0 {
ba.UserPriority = ts.UserPriority
}
ctx = opentracing.ContextWithSpan(ctx, ts.Trace)
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
if br != nil {
for _, encSp := range br.CollectedSpans {
var newSp basictracer.RawSpan
if err := tracing.DecodeRawSpan(encSp, &newSp); err != nil {
return nil, roachpb.NewError(err)
}
ts.CollectedSpans = append(ts.CollectedSpans, newSp)
}
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
TxnMeta: roachpb.TxnMeta{
Isolation: ts.Proto.Isolation,
},
Name: ts.Proto.Name,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
示例10: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
sp := tracing.SpanFromContext(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
sp.LogEvent("read has no clock uncertainty")
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
shallowTxn.MaxTimestamp = ba.Txn.Timestamp
ba.Txn = &shallowTxn
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例11: maybeBeginTxn
// maybeBeginTxn begins a new transaction if a txn has been specified
// in the request but has a nil ID. The new transaction is initialized
// using the name and isolation in the otherwise uninitialized txn.
// The Priority, if non-zero is used as a minimum.
//
// No transactional writes are allowed unless preceded by a begin
// transaction request within the same batch. The exception is if the
// transaction is already in state txn.Writing=true.
func (tc *TxnCoordSender) maybeBeginTxn(ba *roachpb.BatchRequest) error {
if ba.Txn == nil {
return nil
}
if len(ba.Requests) == 0 {
return util.Errorf("empty batch with txn")
}
if ba.Txn.ID == nil {
// Create transaction without a key. The key is set when a begin
// transaction request is received.
// The initial timestamp may be communicated by a higher layer.
// If so, use that. Otherwise make up a new one.
timestamp := ba.Txn.OrigTimestamp
if timestamp == roachpb.ZeroTimestamp {
timestamp = tc.clock.Now()
}
newTxn := roachpb.NewTransaction(ba.Txn.Name, nil, ba.UserPriority,
ba.Txn.Isolation, timestamp, tc.clock.MaxOffset().Nanoseconds())
// Use existing priority as a minimum. This is used on transaction
// aborts to ratchet priority when creating successor transaction.
if newTxn.Priority < ba.Txn.Priority {
newTxn.Priority = ba.Txn.Priority
}
ba.Txn = newTxn
}
// Check for a begin transaction to set txn key based on the key of
// the first transactional write. Also enforce that no transactional
// writes occur before a begin transaction.
var haveBeginTxn bool
for _, req := range ba.Requests {
args := req.GetInner()
if bt, ok := args.(*roachpb.BeginTransactionRequest); ok {
if haveBeginTxn || ba.Txn.Writing {
return util.Errorf("begin transaction requested twice in the same transaction: %s", ba.Txn)
}
haveBeginTxn = true
if ba.Txn.Key == nil {
ba.Txn.Key = bt.Key
}
}
if roachpb.IsTransactionWrite(args) && !haveBeginTxn && !ba.Txn.Writing {
return util.Errorf("transactional write before begin transaction")
}
}
return nil
}
示例12: clientHasAbandoned
func (tc *TxnCoordSender) clientHasAbandoned(txnID uuid.UUID) {
tc.Lock()
txnMeta := tc.txns[txnID]
var intentSpans []roachpb.Span
// TODO(tschottdorf): should we be more proactive here?
// The client might be continuing the transaction
// through another coordinator, but in the most likely
// case it's just gone and the open transaction record
// could block concurrent operations.
if log.V(1) {
log.Infof("transaction %s abandoned; stopping heartbeat", txnMeta.txn)
}
// Grab the intents here to avoid potential race.
intentSpans = collectIntentSpans(txnMeta.keys)
txnMeta.keys.Clear()
// txnMeta.txn is possibly replaced concurrently,
// so grab a copy before unlocking.
txn := txnMeta.txn.Clone()
tc.Unlock()
ba := roachpb.BatchRequest{}
ba.Txn = &txn
// Actively abort the transaction and its intents since we assume it's abandoned.
et := &roachpb.EndTransactionRequest{
Span: roachpb.Span{
Key: txn.Key,
},
Commit: false,
IntentSpans: intentSpans,
}
ba.Add(et)
tc.stopper.RunAsyncTask(func() {
// Use the wrapped sender since the normal Sender
// does not allow clients to specify intents.
// TODO(tschottdorf): not using the existing context here since that
// leads to use-after-finish of the contained trace. Should fork off
// before the goroutine.
if _, pErr := tc.wrapped.Send(context.Background(), ba); pErr != nil {
if log.V(1) {
log.Warningf("abort due to inactivity failed for %s: %s ", txn, pErr)
}
}
})
}
示例13: tryAsyncAbort
// tryAsyncAbort (synchronously) grabs a copy of the txn proto and the intents
// (which it then clears from txnMeta), and asynchronously tries to abort the
// transaction.
func (tc *TxnCoordSender) tryAsyncAbort(txnID uuid.UUID) {
tc.Lock()
txnMeta := tc.txns[txnID]
// Clone the intents and the txn to avoid data races.
txnMeta.keys = append([]roachpb.Span(nil), txnMeta.keys...)
roachpb.MergeSpans(&txnMeta.keys)
intentSpans := txnMeta.keys
txnMeta.keys = nil
txn := txnMeta.txn.Clone()
tc.Unlock()
// Since we don't hold the lock continuously, it's possible that two aborts
// raced here. That's fine (and probably better than the alternative, which
// is missing new intents sometimes).
if txn.Status != roachpb.PENDING {
return
}
ba := roachpb.BatchRequest{}
ba.Txn = &txn
et := &roachpb.EndTransactionRequest{
Span: roachpb.Span{
Key: txn.Key,
},
Commit: false,
IntentSpans: intentSpans,
}
ba.Add(et)
if err := tc.stopper.RunAsyncTask(func() {
// Use the wrapped sender since the normal Sender does not allow
// clients to specify intents.
// TODO(tschottdorf): not using the existing context here since that
// leads to use-after-finish of the contained trace. Should fork off
// before the goroutine.
if _, pErr := tc.wrapped.Send(context.Background(), ba); pErr != nil {
if log.V(1) {
log.Warningf("abort due to inactivity failed for %s: %s ", txn, pErr)
}
}
}); err != nil {
log.Warning(err)
}
}
示例14: heartbeat
func (tc *TxnCoordSender) heartbeat(ctx context.Context, txnID uuid.UUID) bool {
tc.Lock()
txnMeta := tc.txns[txnID]
txn := txnMeta.txn.Clone()
tc.Unlock()
// Before we send a heartbeat, determine whether this transaction should be
// considered abandoned. If so, exit heartbeat. If ctx.Done() is not nil, then
// it is a cancellable Context and we skip this check and use the ctx lifetime
// instead of a timeout.
if ctx.Done() == nil && txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) {
tc.clientHasAbandoned(txnID)
return false
}
ba := roachpb.BatchRequest{}
ba.Txn = &txn
hb := &roachpb.HeartbeatTxnRequest{
Now: tc.clock.Now(),
}
hb.Key = txn.Key
ba.Add(hb)
log.Trace(ctx, "heartbeat")
_, err := tc.wrapped.Send(ctx, ba)
// If the transaction is not in pending state, then we can stop
// the heartbeat. It's either aborted or committed, and we resolve
// write intents accordingly.
if err != nil {
log.Warningf("heartbeat to %s failed: %s", txn, err)
}
// TODO(bdarnell): once we have gotten a heartbeat response with
// Status != PENDING, future heartbeats are useless. However, we
// need to continue the heartbeatLoop until the client either
// commits or abandons the transaction. We could save a little
// pointless work by restructuring this loop to stop sending
// heartbeats between the time that the transaction is aborted and
// the client finds out. Furthermore, we could use this information
// to send TransactionAbortedErrors to the client so it can restart
// immediately instead of running until its EndTransaction.
return true
}
示例15: maybeBeginTxn
// maybeBeginTxn begins a new transaction if a txn has been specified
// in the request but has a nil ID. The new transaction is initialized
// using the name and isolation in the otherwise uninitialized txn.
// The Priority, if non-zero is used as a minimum.
func (tc *TxnCoordSender) maybeBeginTxn(ba *roachpb.BatchRequest) {
if ba.Txn == nil {
return
}
if len(ba.Requests) == 0 {
panic("empty batch with txn")
}
if len(ba.Txn.ID) == 0 {
// TODO(tschottdorf): should really choose the first txn write here.
firstKey := ba.Requests[0].GetInner().Header().Key
newTxn := roachpb.NewTransaction(ba.Txn.Name, keys.KeyAddress(firstKey), ba.GetUserPriority(),
ba.Txn.Isolation, tc.clock.Now(), tc.clock.MaxOffset().Nanoseconds())
// Use existing priority as a minimum. This is used on transaction
// aborts to ratchet priority when creating successor transaction.
if newTxn.Priority < ba.Txn.Priority {
newTxn.Priority = ba.Txn.Priority
}
ba.Txn = newTxn
}
}