本文整理汇总了Golang中github.com/cockroachdb/cockroach/roachpb.BatchRequest.GetArg方法的典型用法代码示例。如果您正苦于以下问题:Golang BatchRequest.GetArg方法的具体用法?Golang BatchRequest.GetArg怎么用?Golang BatchRequest.GetArg使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/roachpb.BatchRequest
的用法示例。
在下文中一共展示了BatchRequest.GetArg方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Send
// Send forwards the call to the single store. This is a poor man's
// version of kv.TxnCoordSender, but it serves the purposes of
// supporting tests in this package. Transactions are not supported.
// Since kv/ depends on storage/, we can't get access to a
// TxnCoordSender from here.
// TODO(tschottdorf): {kv->storage}.LocalSender
func (db *testSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if et, ok := ba.GetArg(roachpb.EndTransaction); ok {
return nil, roachpb.NewError(util.Errorf("%s method not supported", et.Method()))
}
// Lookup range and direct request.
key, endKey := keys.Range(ba)
rng := db.store.LookupReplica(key, endKey)
if rng == nil {
return nil, roachpb.NewError(roachpb.NewRangeKeyMismatchError(key, endKey, nil))
}
ba.RangeID = rng.Desc().RangeID
replica := rng.GetReplica()
if replica == nil {
return nil, roachpb.NewError(util.Errorf("own replica missing in range"))
}
ba.Replica = *replica
br, pErr := db.store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(db.store, br))
}
if pErr != nil {
return nil, pErr
}
return br, nil
}
示例2: updateState
//.........这里部分代码省略.........
t.Txn = *newTxn
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.TransactionPushError:
newTxn.Update(t.Txn)
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
t.Txn = newTxn
case *roachpb.TransactionRetryError:
newTxn.Update(&t.Txn)
newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case roachpb.TransactionRestartError:
// Assertion: The above cases should exhaust all ErrorDetails which
// carry a Transaction.
if pErr.Detail != nil {
panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
}
default:
trace.SetError()
}
return func() *roachpb.Error {
if len(newTxn.ID) <= 0 {
return pErr
}
id := string(newTxn.ID)
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[id]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation.
// A tricky edge case is that of a transaction which "fails" on the
// first writing request, but actually manages to write some intents
// (for example, due to being multi-range). In this case, there will
// be an error, but the transaction will be marked as Writing and the
// coordinator must track the state, for the client's retry will be
// performed with a Writing transaction which the coordinator rejects
// unless it is tracking it (on top of it making sense to track it;
// after all, it **has** laid down intents and only the coordinator
// can augment a potential EndTransaction call).
// consider re-using those.
if intents := ba.GetIntents(); len(intents) > 0 && (err == nil || newTxn.Writing) {
if txnMeta == nil {
if !newTxn.Writing {
panic("txn with intents marked as non-writing")
}
txnMeta = &txnMetadata{
txn: *newTxn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[id] = txnMeta
// If the transaction is already over, there's no point in
// launching a one-off coordinator which will shut down right
// away. If we ended up here with an error, we'll always start
// the coordinator - the transaction has laid down intents, so
// we expect it to be committed/aborted at some point in the
// future.
if _, isEnding := ba.GetArg(roachpb.EndTransaction); err != nil || !isEnding {
trace.Event("coordinator spawns")
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(id)
}) {
// The system is already draining and we can't start the
// heartbeat. We refuse new transactions for now because
// they're likely not going to have all intents committed.
// In principle, we can relax this as needed though.
tc.unregisterTxnLocked(id)
return roachpb.NewError(&roachpb.NodeUnavailableError{})
}
}
}
for _, intent := range intents {
txnMeta.addKeyRange(intent.Key, intent.EndKey)
}
}
// Update our record of this transaction, even on error.
if txnMeta != nil {
txnMeta.txn = *newTxn
if !txnMeta.txn.Writing {
panic("tracking a non-writing txn")
}
txnMeta.setLastUpdate(tc.clock.PhysicalNow())
}
if err == nil {
// For successful transactional requests, always send the updated txn
// record back.
br.Txn = newTxn
}
return pErr
}()
}
示例3: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
ba.CmdID = ba.GetOrCreateCmdID(tc.clock.PhysicalNow())
var startNS int64
// This is the earliest point at which the request has a ClientCmdID and/or
// TxnID (if applicable). Begin a Trace which follows this request.
trace := tc.tracer.NewTrace(tracer.Coord, &ba)
defer trace.Finalize()
defer trace.Epoch("sending batch")()
ctx = tracer.ToCtx(ctx, trace)
var id string // optional transaction ID
if ba.Txn != nil {
// If this request is part of a transaction...
id = string(ba.Txn.ID)
// Verify that if this Transaction is not read-only, we have it on
// file. If not, refuse writes - the client must have issued a write on
// another coordinator previously.
if ba.Txn.Writing && ba.IsTransactionWrite() {
tc.Lock()
_, ok := tc.txns[id]
tc.Unlock()
if !ok {
return nil, roachpb.NewError(util.Errorf("transaction must not write on multiple coordinators"))
}
}
// Set the timestamp to the original timestamp for read-only
// commands and to the transaction timestamp for read/write
// commands.
if ba.IsReadOnly() {
ba.Timestamp = ba.Txn.OrigTimestamp
} else {
ba.Timestamp = ba.Txn.Timestamp
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewError(util.Errorf("EndTransaction must not have a Key set"))
}
et.Key = ba.Txn.Key
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
if len(et.Intents) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewError(util.Errorf("client must not pass intents to EndTransaction"))
}
tc.Lock()
txnMeta, metaOK := tc.txns[id]
if id != "" && metaOK {
et.Intents = txnMeta.intents()
}
tc.Unlock()
if intents := ba.GetIntents(); len(intents) > 0 {
// Writes in Batch, so EndTransaction is fine. Should add
// outstanding intents to EndTransaction, though.
// TODO(tschottdorf): possible issues when the batch fails,
// but the intents have been added anyways.
// TODO(tschottdorf): some of these intents may be covered
// by others, for example {[a,b), a}). This can lead to
// some extra requests when those are non-local to the txn
// record. But it doesn't seem worth optimizing now.
et.Intents = append(et.Intents, intents...)
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, roachpb.NewError(util.Errorf("transaction is already committed or aborted"))
}
if len(et.Intents) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state in
// the client.
return nil, roachpb.NewError(util.Errorf("cannot commit a read-only transaction"))
}
if log.V(1) {
for _, intent := range et.Intents {
trace.Event(fmt.Sprintf("intent: [%s,%s)", intent.Key, intent.EndKey))
}
//.........这里部分代码省略.........
示例4: updateState
//.........这里部分代码省略.........
if pErr != nil && pErr.GetTxn() != nil {
// Avoid changing existing errors because sometimes they escape into
// goroutines and then there are races. Fairly sure there isn't one
// here, but better safe than sorry.
pErrShallow := *pErr
pErrShallow.SetTxn(newTxn)
pErr = &pErrShallow
}
if newTxn.ID == nil {
return pErr
}
txnID := *newTxn.ID
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[txnID]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation.
// A tricky edge case is that of a transaction which "fails" on the
// first writing request, but actually manages to write some intents
// (for example, due to being multi-range). In this case, there will
// be an error, but the transaction will be marked as Writing and the
// coordinator must track the state, for the client's retry will be
// performed with a Writing transaction which the coordinator rejects
// unless it is tracking it (on top of it making sense to track it;
// after all, it **has** laid down intents and only the coordinator
// can augment a potential EndTransaction call). See #3303.
var intentGroup interval.RangeGroup
if txnMeta != nil {
intentGroup = txnMeta.keys
} else if pErr == nil || newTxn.Writing {
intentGroup = interval.NewRangeTree()
}
if intentGroup != nil {
// Adding the intents even on error reduces the likelihood of dangling
// intents blocking concurrent writers for extended periods of time.
// See #3346.
ba.IntentSpanIterate(func(key, endKey roachpb.Key) {
addKeyRange(intentGroup, key, endKey)
})
if txnMeta == nil && intentGroup.Len() > 0 {
if !newTxn.Writing {
panic("txn with intents marked as non-writing")
}
// If the transaction is already over, there's no point in
// launching a one-off coordinator which will shut down right
// away. If we ended up here with an error, we'll always start
// the coordinator - the transaction has laid down intents, so
// we expect it to be committed/aborted at some point in the
// future.
if _, isEnding := ba.GetArg(roachpb.EndTransaction); pErr != nil || !isEnding {
log.Trace(ctx, "coordinator spawns")
txnMeta = &txnMetadata{
txn: *newTxn,
keys: intentGroup,
firstUpdateNanos: startNS,
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[txnID] = txnMeta
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(ctx, txnID)
}) {
// The system is already draining and we can't start the
// heartbeat. We refuse new transactions for now because
// they're likely not going to have all intents committed.
// In principle, we can relax this as needed though.
tc.unregisterTxnLocked(txnID)
return roachpb.NewError(&roachpb.NodeUnavailableError{})
}
} else {
// If this was a successful one phase commit, update stats
// directly as they won't otherwise be updated on heartbeat
// loop shutdown.
etArgs, ok := br.Responses[len(br.Responses)-1].GetInner().(*roachpb.EndTransactionResponse)
tc.updateStats(tc.clock.PhysicalNow()-startNS, 0, newTxn.Status, ok && etArgs.OnePhaseCommit)
}
}
}
// Update our record of this transaction, even on error.
if txnMeta != nil {
txnMeta.txn = *newTxn
if !txnMeta.txn.Writing {
panic("tracking a non-writing txn")
}
txnMeta.setLastUpdate(tc.clock.PhysicalNow())
}
if pErr == nil {
// For successful transactional requests, always send the updated txn
// record back.
br.Txn = newTxn
}
return pErr
}
示例5: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
{
// Start new or pick up active trace and embed its trace metadata into
// header for use by RPC recipients. From here on, there's always an active
// Trace, though its overhead is small unless it's sampled.
sp := opentracing.SpanFromContext(ctx)
if sp == nil {
sp = tc.tracer.StartSpan(opTxnCoordSender)
defer sp.Finish()
ctx = opentracing.ContextWithSpan(ctx, sp)
}
// TODO(tschottdorf): To get rid of the spurious alloc below we need to
// implement the carrier interface on ba.Header or make Span non-nullable,
// both of which force all of ba on the Heap. It's already there, so may
// not be a big deal, but ba should live on the stack. Also not easy to use
// a buffer pool here since anything that goes into the RPC layer could be
// used by goroutines we didn't wait for.
if ba.Header.Trace == nil {
ba.Header.Trace = &tracing.Span{}
}
if err := tc.tracer.Inject(sp, basictracer.Delegator, ba.Trace); err != nil {
return nil, roachpb.NewError(err)
}
}
startNS := tc.clock.PhysicalNow()
if ba.Txn != nil {
// If this request is part of a transaction...
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
txnID := *ba.Txn.ID
// Verify that if this Transaction is not read-only, we have it on file.
// If not, refuse further operations - the transaction was aborted due
// to a timeout or the client must have issued a write on another
// coordinator previously.
if ba.Txn.Writing {
tc.Lock()
_, ok := tc.txns[txnID]
tc.Unlock()
if !ok {
pErr := roachpb.NewErrorf("writing transaction timed out, was aborted, " +
"or ran on multiple coordinators")
return nil, pErr
}
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewErrorf("EndTransaction must not have a Key set")
}
et.Key = ba.Txn.Key
if len(et.IntentSpans) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewErrorf("client must not pass intents to EndTransaction")
}
tc.Lock()
txnMeta, metaOK := tc.txns[txnID]
{
// Populate et.IntentSpans, taking into account both existing
// writes (if any) and new writes in this batch, and taking
// care to perform proper deduplication.
var keys interval.RangeGroup
if metaOK {
keys = txnMeta.keys
} else {
keys = interval.NewRangeTree()
}
ba.IntentSpanIterate(func(key, endKey roachpb.Key) {
addKeyRange(keys, key, endKey)
})
et.IntentSpans = collectIntentSpans(keys)
}
tc.Unlock()
if len(et.IntentSpans) > 0 {
// All good, proceed.
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, roachpb.NewErrorf("transaction is already committed or aborted")
}
//.........这里部分代码省略.........
示例6: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
{
// Start new or pick up active trace and embed its trace metadata into
// header for use by RPC recipients. From here on, there's always an active
// Trace, though its overhead is small unless it's sampled.
sp := opentracing.SpanFromContext(ctx)
// TODO(radu): once contexts are plumbed correctly, we should use the Tracer
// from ctx.
tracer := tracing.TracerFromCtx(tc.ctx)
if sp == nil {
sp = tracer.StartSpan(opTxnCoordSender)
defer sp.Finish()
ctx = opentracing.ContextWithSpan(ctx, sp)
}
// TODO(tschottdorf): To get rid of the spurious alloc below we need to
// implement the carrier interface on ba.Header or make Span non-nullable,
// both of which force all of ba on the Heap. It's already there, so may
// not be a big deal, but ba should live on the stack. Also not easy to use
// a buffer pool here since anything that goes into the RPC layer could be
// used by goroutines we didn't wait for.
if ba.Header.Trace == nil {
ba.Header.Trace = &tracing.Span{}
} else {
// We didn't make this object but are about to mutate it, so we
// have to take a copy - the original might already have been
// passed to the RPC layer.
ba.Header.Trace = protoutil.Clone(ba.Header.Trace).(*tracing.Span)
}
if err := tracer.Inject(sp.Context(), basictracer.Delegator, ba.Trace); err != nil {
return nil, roachpb.NewError(err)
}
}
startNS := tc.clock.PhysicalNow()
if ba.Txn != nil {
// If this request is part of a transaction...
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
var et *roachpb.EndTransactionRequest
var hasET bool
{
var rArgs roachpb.Request
rArgs, hasET = ba.GetArg(roachpb.EndTransaction)
if hasET {
et = rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewErrorf("EndTransaction must not have a Key set")
}
et.Key = ba.Txn.Key
if len(et.IntentSpans) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewErrorf("client must not pass intents to EndTransaction")
}
}
}
if pErr := func() *roachpb.Error {
tc.Lock()
defer tc.Unlock()
if pErr := tc.maybeRejectClientLocked(ctx, *ba.Txn); pErr != nil {
return pErr
}
if !hasET {
return nil
}
// Everything below is carried out only when trying to commit.
// Populate et.IntentSpans, taking into account both any existing
// and new writes, and taking care to perform proper deduplication.
txnMeta := tc.txns[*ba.Txn.ID]
distinctSpans := true
if txnMeta != nil {
et.IntentSpans = txnMeta.keys
// Defensively set distinctSpans to false if we had any previous
// requests in this transaction. This effectively limits the distinct
// spans optimization to 1pc transactions.
distinctSpans = len(txnMeta.keys) == 0
}
ba.IntentSpanIterate(func(key, endKey roachpb.Key) {
et.IntentSpans = append(et.IntentSpans, roachpb.Span{
Key: key,
EndKey: endKey,
})
})
// TODO(peter): Populate DistinctSpans on all batches, not just batches
// which contain an EndTransactionRequest.
//.........这里部分代码省略.........
示例7: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error) *roachpb.Error {
trace := tracer.FromCtx(ctx)
newTxn := &roachpb.Transaction{}
newTxn.Update(ba.GetTxn())
err := pErr.GoError()
switch t := err.(type) {
case nil:
newTxn.Update(br.GetTxn())
// Move txn timestamp forward to response timestamp if applicable.
// TODO(tschottdorf): see (*Replica).executeBatch and comments within.
// Looks like this isn't necessary any more, nor did it prevent a bug
// referenced in a TODO there.
newTxn.Timestamp.Forward(br.Timestamp)
case *roachpb.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.OpRequiresTxnError:
// TODO(tschottdorf): range-spanning autowrap currently broken.
panic("TODO(tschottdorf): disabled")
case *roachpb.ReadWithinUncertaintyIntervalError:
// Mark the host as certain. See the protobuf comment for
// Transaction.CertainNodes for details.
if t.NodeID == 0 {
panic("no replica set in header on uncertainty restart")
}
newTxn.CertainNodes.Add(t.NodeID)
// If the reader encountered a newer write within the uncertainty
// interval, move the timestamp forward, just past that write or
// up to MaxTimestamp, whichever comes first.
candidateTS := newTxn.MaxTimestamp
candidateTS.Backward(t.ExistingTimestamp.Add(0, 1))
newTxn.Timestamp.Forward(candidateTS)
newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case *roachpb.TransactionAbortedError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Priority = t.Txn.Priority
t.Txn = *newTxn
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.TransactionPushError:
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
t.Txn = newTxn
case *roachpb.TransactionRetryError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case roachpb.TransactionRestartError:
// Assertion: The above cases should exhaust all ErrorDetails which
// carry a Transaction.
if pErr.Detail != nil {
panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
}
}
return func() *roachpb.Error {
if len(newTxn.ID) <= 0 {
return pErr
}
id := string(newTxn.ID)
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[id]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation
// TODO(tschottdorf): already computed the intents prior to sending,
// consider re-using those.
if intents := ba.GetIntents(); len(intents) > 0 && err == nil {
if txnMeta == nil {
newTxn.Writing = true
txnMeta = &txnMetadata{
txn: *newTxn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[id] = txnMeta
// If the transaction is already over, there's no point in
// launching a one-off coordinator which will shut down right
// away.
if _, isEnding := ba.GetArg(roachpb.EndTransaction); !isEnding {
trace.Event("coordinator spawns")
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(id)
}) {
// The system is already draining and we can't start the
//.........这里部分代码省略.........
示例8: updateState
//.........这里部分代码省略.........
newTxn.Restart(ba.UserPriority, pErr.GetTxn().Priority, newTxn.Timestamp)
default:
if pErr.GetTxn() != nil {
if pErr.CanRetry() {
panic("Retryable internal error must not happen at this level")
} else {
// Do not clean up the transaction here since the client might still
// want to continue the transaction. For example, a client might
// continue its transaction after receiving ConditionFailedError, which
// can come from a unique index violation.
}
}
}
if pErr != nil && pErr.GetTxn() != nil {
// Avoid changing existing errors because sometimes they escape into
// goroutines and then there are races. Fairly sure there isn't one
// here, but better safe than sorry.
pErrShallow := *pErr
pErrShallow.SetTxn(newTxn)
pErr = &pErrShallow
}
if newTxn.ID == nil {
return pErr
}
txnID := *newTxn.ID
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[txnID]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation.
// A tricky edge case is that of a transaction which "fails" on the
// first writing request, but actually manages to write some intents
// (for example, due to being multi-range). In this case, there will
// be an error, but the transaction will be marked as Writing and the
// coordinator must track the state, for the client's retry will be
// performed with a Writing transaction which the coordinator rejects
// unless it is tracking it (on top of it making sense to track it;
// after all, it **has** laid down intents and only the coordinator
// can augment a potential EndTransaction call). See #3303.
intents := ba.GetIntentSpans()
if len(intents) > 0 && (pErr == nil || newTxn.Writing) {
if txnMeta == nil {
if !newTxn.Writing {
panic("txn with intents marked as non-writing")
}
// If the transaction is already over, there's no point in
// launching a one-off coordinator which will shut down right
// away. If we ended up here with an error, we'll always start
// the coordinator - the transaction has laid down intents, so
// we expect it to be committed/aborted at some point in the
// future.
if _, isEnding := ba.GetArg(roachpb.EndTransaction); pErr != nil || !isEnding {
sp.LogEvent("coordinator spawns")
txnMeta = &txnMetadata{
txn: *newTxn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[txnID] = txnMeta
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(txnID)
}) {
// The system is already draining and we can't start the
// heartbeat. We refuse new transactions for now because
// they're likely not going to have all intents committed.
// In principle, we can relax this as needed though.
tc.unregisterTxnLocked(txnID)
return roachpb.NewError(&roachpb.NodeUnavailableError{})
}
}
}
}
// Update our record of this transaction, even on error.
if txnMeta != nil {
txnMeta.txn = *newTxn
if !txnMeta.txn.Writing {
panic("tracking a non-writing txn")
}
txnMeta.setLastUpdate(tc.clock.PhysicalNow())
// Adding the intents even on error reduces the likelihood of dangling
// intents blocking concurrent writers for extended periods of time.
// See #3346.
for _, intent := range intents {
txnMeta.addKeyRange(intent.Key, intent.EndKey)
}
}
if pErr == nil {
// For successful transactional requests, always send the updated txn
// record back.
br.Txn = newTxn
}
return pErr
}
示例9: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Start new or pick up active trace and embed its trace metadata into
// header for use by RPC recipients. From here on, there's always an active
// Trace, though its overhead is small unless it's sampled.
sp, cleanupSp := tracing.SpanFromContext(opTxnCoordSender, tc.tracer, ctx)
defer cleanupSp()
// TODO(tschottdorf): To get rid of the spurious alloc below we need to
// implement the carrier interface on ba.Header or make Span non-nullable,
// both of which force all of ba on the Heap. It's already there, so may
// not be a big deal, but ba should live on the stack. Also not easy to use
// a buffer pool here since anything that goes into the RPC layer could be
// used by goroutines we didn't wait for.
if ba.Header.Trace == nil {
ba.Header.Trace = &tracing.Span{}
}
if err := tc.tracer.Inject(sp, basictracer.Delegator, ba.Trace); err != nil {
return nil, roachpb.NewError(err)
}
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
var startNS int64
ba.SetNewRequest()
// This is the earliest point at which the request has an ID (if
// applicable). Begin a Trace which follows this request.
ctx = opentracing.ContextWithSpan(ctx, sp)
if ba.Txn != nil {
// If this request is part of a transaction...
txnID := *ba.Txn.ID
// Verify that if this Transaction is not read-only, we have it on
// file. If not, refuse writes - the client must have issued a write on
// another coordinator previously.
if ba.Txn.Writing && ba.IsTransactionWrite() {
tc.Lock()
_, ok := tc.txns[txnID]
tc.Unlock()
if !ok {
return nil, roachpb.NewErrorf("transaction must not write on multiple coordinators")
}
}
// Set the timestamp to the original timestamp for read-only
// commands and to the transaction timestamp for read/write
// commands.
if ba.IsReadOnly() {
ba.Timestamp = ba.Txn.OrigTimestamp
} else {
ba.Timestamp = ba.Txn.Timestamp
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewErrorf("EndTransaction must not have a Key set")
}
et.Key = ba.Txn.Key
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
if len(et.IntentSpans) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewErrorf("client must not pass intents to EndTransaction")
}
tc.Lock()
txnMeta, metaOK := tc.txns[txnID]
if metaOK {
et.IntentSpans = txnMeta.intentSpans()
}
tc.Unlock()
if intentSpans := ba.GetIntentSpans(); len(intentSpans) > 0 {
// Writes in Batch, so EndTransaction is fine. Should add
// outstanding intents to EndTransaction, though.
// TODO(tschottdorf): possible issues when the batch fails,
// but the intents have been added anyways.
// TODO(tschottdorf): some of these intents may be covered
// by others, for example {[a,b), a}). This can lead to
// some extra requests when those are non-local to the txn
// record. But it doesn't seem worth optimizing now.
et.IntentSpans = append(et.IntentSpans, intentSpans...)
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
//.........这里部分代码省略.........