本文整理汇总了Golang中github.com/cockroachdb/cockroach/roachpb.BatchRequest.Add方法的典型用法代码示例。如果您正苦于以下问题:Golang BatchRequest.Add方法的具体用法?Golang BatchRequest.Add怎么用?Golang BatchRequest.Add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/roachpb.BatchRequest
的用法示例。
在下文中一共展示了BatchRequest.Add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
return ba.CreateReply(), nil
}), clock, false, nil, stopper)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
put := &roachpb.PutRequest{}
put.Key = roachpb.Key("test")
ba.Add(put)
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例2: rangeLookup
// rangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) rangeLookup(key roachpb.Key, options lookupOptions,
desc *roachpb.RangeDescriptor) ([]roachpb.RangeDescriptor, error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
RequestHeader: roachpb.RequestHeader{
Key: key,
ReadConsistency: roachpb.INCONSISTENT,
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: options.considerIntents,
Reverse: options.useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
// TODO(tschottdorf) consider a Trace here, potentially that of the request
// that had the cache miss and waits for the result.
br, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, ba)
if err != nil {
return nil, err
}
if err := br.GoError(); err != nil {
return nil, err
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例3: TestBatchError
// TestBatchError verifies that Range returns an error if a request has an invalid range.
func TestBatchError(t *testing.T) {
testCases := []struct {
req [2]string
errMsg string
}{
{
req: [2]string{"\xff\xff\xff\xff", "a"},
errMsg: "must be less than KeyMax",
},
{
req: [2]string{"a", "\xff\xff\xff\xff"},
errMsg: "must be less than or equal to KeyMax",
},
}
for i, c := range testCases {
var ba roachpb.BatchRequest
ba.Add(&roachpb.ScanRequest{Span: roachpb.Span{Key: roachpb.Key(c.req[0]), EndKey: roachpb.Key(c.req[1])}})
if _, err := Range(ba); !testutils.IsError(err, c.errMsg) {
t.Errorf("%d: unexpected error %v", i, err)
}
}
// Test a case where a non-range request has an end key.
var ba roachpb.BatchRequest
ba.Add(&roachpb.GetRequest{Span: roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("b")}})
if _, err := Range(ba); !testutils.IsError(err, "end key specified for non-range operation") {
t.Errorf("unexpected error %v", err)
}
}
示例4: RangeLookup
// RangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
trace := ds.Tracer.StartSpan("range lookup")
defer trace.Finish()
// TODO(tschottdorf): Ideally we would use the trace of the request which
// caused this lookup instead of a new one.
br, err := ds.sendRPC(trace, desc.RangeID, replicas, orderRandom, ba)
if err != nil {
return nil, err
}
if br.Error != nil {
return nil, br.Error
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例5: send
// send runs the specified calls synchronously in a single batch and returns
// any errors. Returns a nil response for empty input (no requests).
func (db *DB) send(maxScanResults int64, readConsistency roachpb.ReadConsistencyType,
reqs ...roachpb.Request) (*roachpb.BatchResponse, *roachpb.Error) {
if len(reqs) == 0 {
return nil, nil
}
if readConsistency == roachpb.INCONSISTENT {
for _, req := range reqs {
if req.Method() != roachpb.Get && req.Method() != roachpb.Scan &&
req.Method() != roachpb.ReverseScan {
return nil, roachpb.NewErrorf("method %s not allowed with INCONSISTENT batch", req.Method)
}
}
}
ba := roachpb.BatchRequest{}
ba.Add(reqs...)
ba.MaxScanResults = maxScanResults
if db.userPriority != 1 {
ba.UserPriority = db.userPriority
}
ba.ReadConsistency = readConsistency
tracing.AnnotateTrace()
br, pErr := db.sender.Send(context.TODO(), ba)
if pErr != nil {
if log.V(1) {
log.Infof("failed batch: %s", pErr)
}
return nil, pErr
}
return br, nil
}
示例6: RangeLookup
// RangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
// TODO(tschottdorf) consider a Trace here, potentially that of the request
// that had the cache miss and waits for the result.
br, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, ba)
if err != nil {
return nil, err
}
if br.Error != nil {
return nil, br.Error
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例7: send
// send runs the specified calls synchronously in a single batch and returns
// any errors. Returns a nil response for empty input (no requests).
func (db *DB) send(maxScanResults int64, reqs ...roachpb.Request) (
*roachpb.BatchResponse, *roachpb.Error) {
if len(reqs) == 0 {
return nil, nil
}
ba := roachpb.BatchRequest{}
ba.Add(reqs...)
ba.MaxScanResults = maxScanResults
if db.userPriority != 1 {
ba.UserPriority = db.userPriority
}
tracing.AnnotateTrace()
br, pErr := db.sender.Send(context.TODO(), ba)
if pErr != nil {
if log.V(1) {
log.Infof("failed batch: %s", pErr)
}
return nil, pErr
}
return br, nil
}
示例8: RangeLookup
// RangeLookup implements the RangeDescriptorDB interface.
// RangeLookup dispatches a RangeLookup request for the given metadata
// key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(
key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool,
) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
replicas.Shuffle()
// TODO(tschottdorf): Ideally we would use the trace of the request which
// caused this lookup.
_ = context.TODO()
br, err := ds.sendRPC(ds.Ctx, desc.RangeID, replicas, ba)
if err != nil {
return nil, nil, roachpb.NewError(err)
}
if br.Error != nil {
return nil, nil, br.Error
}
resp := br.Responses[0].GetInner().(*roachpb.RangeLookupResponse)
return resp.Ranges, resp.PrefetchedRanges, nil
}
示例9: testPut
func testPut() roachpb.BatchRequest {
var ba roachpb.BatchRequest
ba.Timestamp = testTS
put := &roachpb.PutRequest{}
put.Key = testKey
ba.Add(put)
return ba
}
示例10: heartbeat
func (tc *TxnCoordSender) heartbeat(id string, trace *tracer.Trace, ctx context.Context) bool {
tc.Lock()
proceed := true
txnMeta := tc.txns[id]
// Before we send a heartbeat, determine whether this transaction
// should be considered abandoned. If so, exit heartbeat.
if txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) {
// TODO(tschottdorf): should we be more proactive here?
// The client might be continuing the transaction
// through another coordinator, but in the most likely
// case it's just gone and the open transaction record
// could block concurrent operations.
if log.V(1) {
log.Infof("transaction %s abandoned; stopping heartbeat",
txnMeta.txn)
}
proceed = false
}
// txnMeta.txn is possibly replaced concurrently,
// so grab a copy before unlocking.
txn := txnMeta.txn
tc.Unlock()
if !proceed {
return false
}
hb := &roachpb.HeartbeatTxnRequest{}
hb.Key = txn.Key
ba := roachpb.BatchRequest{}
ba.Timestamp = tc.clock.Now()
ba.CmdID = ba.GetOrCreateCmdID(ba.Timestamp.WallTime)
ba.Txn = txn.Clone()
ba.Add(hb)
epochEnds := trace.Epoch("heartbeat")
_, err := tc.wrapped.Send(ctx, ba)
epochEnds()
// If the transaction is not in pending state, then we can stop
// the heartbeat. It's either aborted or committed, and we resolve
// write intents accordingly.
if err != nil {
log.Warningf("heartbeat to %s failed: %s", txn, err)
}
// TODO(bdarnell): once we have gotten a heartbeat response with
// Status != PENDING, future heartbeats are useless. However, we
// need to continue the heartbeatLoop until the client either
// commits or abandons the transaction. We could save a little
// pointless work by restructuring this loop to stop sending
// heartbeats between the time that the transaction is aborted and
// the client finds out. Furthermore, we could use this information
// to send TransactionAbortedErrors to the client so it can restart
// immediately instead of running until its EndTransaction.
return true
}
示例11: TestBatchPrevNext
// TestBatchPrevNext tests batch.{Prev,Next}.
func TestBatchPrevNext(t *testing.T) {
defer leaktest.AfterTest(t)()
loc := func(s string) string {
return string(keys.RangeDescriptorKey(roachpb.RKey(s)))
}
span := func(strs ...string) []roachpb.Span {
var r []roachpb.Span
for i, str := range strs {
if i%2 == 0 {
r = append(r, roachpb.Span{Key: roachpb.Key(str)})
} else {
r[len(r)-1].EndKey = roachpb.Key(str)
}
}
return r
}
max, min := string(roachpb.RKeyMax), string(roachpb.RKeyMin)
abc := span("a", "", "b", "", "c", "")
testCases := []struct {
spans []roachpb.Span
key, expFW, expBW string
}{
{spans: span("a", "c", "b", ""), key: "b", expFW: "b", expBW: "b"},
{spans: span("a", "c", "b", ""), key: "a", expFW: "a", expBW: "a"},
{spans: span("a", "c", "d", ""), key: "c", expFW: "d", expBW: "c"},
{spans: span("a", "c\x00", "d", ""), key: "c", expFW: "c", expBW: "c"},
{spans: abc, key: "b", expFW: "b", expBW: "b"},
{spans: abc, key: "b\x00", expFW: "c", expBW: "b\x00"},
{spans: abc, key: "bb", expFW: "c", expBW: "b"},
{spans: span(), key: "whatevs", expFW: max, expBW: min},
{spans: span(loc("a"), loc("c")), key: "c", expFW: "c", expBW: "c"},
{spans: span(loc("a"), loc("c")), key: "c\x00", expFW: max, expBW: "c\x00"},
}
for i, test := range testCases {
var ba roachpb.BatchRequest
for _, span := range test.spans {
args := &roachpb.ScanRequest{}
args.Key, args.EndKey = span.Key, span.EndKey
ba.Add(args)
}
if next, err := next(ba, roachpb.RKey(test.key)); err != nil {
t.Errorf("%d: %v", i, err)
} else if !bytes.Equal(next, roachpb.Key(test.expFW)) {
t.Errorf("%d: next: expected %q, got %q", i, test.expFW, next)
}
if prev, err := prev(ba, roachpb.RKey(test.key)); err != nil {
t.Errorf("%d: %v", i, err)
} else if !bytes.Equal(prev, roachpb.Key(test.expBW)) {
t.Errorf("%d: prev: expected %q, got %q", i, test.expBW, prev)
}
}
}
示例12: process
// process iterates through all keys in a replica's range, calling the garbage
// collector for each key and associated set of values. GC'd keys are batched
// into GC calls. Extant intents are resolved if intents are older than
// intentAgeThreshold. The transaction and abort cache records are also
// scanned and old entries evicted. During normal operation, both of these
// records are cleaned up when their respective transaction finishes, so the
// amount of work done here is expected to be small.
//
// Some care needs to be taken to avoid cyclic recreation of entries during GC:
// * a Push initiated due to an intent may recreate a transaction entry
// * resolving an intent may write a new abort cache entry
// * obtaining the transaction for a abort cache entry requires a Push
//
// The following order is taken below:
// 1) collect all intents with sufficiently old txn record
// 2) collect these intents' transactions
// 3) scan the transaction table, collecting abandoned or completed txns
// 4) push all of these transactions (possibly recreating entries)
// 5) resolve all intents (unless the txn is still PENDING), which will recreate
// abort cache entries (but with the txn timestamp; i.e. likely gc'able)
// 6) scan the abort cache table for old entries
// 7) push these transactions (again, recreating txn entries).
// 8) send a GCRequest.
func (gcq *gcQueue) process(
ctx context.Context,
now hlc.Timestamp,
repl *Replica,
sysCfg config.SystemConfig,
) error {
snap := repl.store.Engine().NewSnapshot()
desc := repl.Desc()
defer snap.Close()
// Lookup the GC policy for the zone containing this key range.
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return errors.Errorf("could not find zone config for range %s: %s", repl, err)
}
gcKeys, info, err := RunGC(ctx, desc, snap, now, zone.GC,
func(now hlc.Timestamp, txn *roachpb.Transaction, typ roachpb.PushTxnType) {
pushTxn(gcq.store.DB(), now, txn, typ)
},
func(intents []roachpb.Intent, poison bool, wait bool) error {
return repl.store.intentResolver.resolveIntents(ctx, intents, poison, wait)
})
if err != nil {
return err
}
gcq.eventLog.VInfof(true, "completed with stats %+v", info)
var ba roachpb.BatchRequest
var gcArgs roachpb.GCRequest
// TODO(tschottdorf): This is one of these instances in which we want
// to be more careful that the request ends up on the correct Replica,
// and we might have to worry about mixing range-local and global keys
// in a batch which might end up spanning Ranges by the time it executes.
gcArgs.Key = desc.StartKey.AsRawKey()
gcArgs.EndKey = desc.EndKey.AsRawKey()
gcArgs.Keys = gcKeys
gcArgs.Threshold = info.Threshold
// Technically not needed since we're talking directly to the Range.
ba.RangeID = desc.RangeID
ba.Timestamp = now
ba.Add(&gcArgs)
if _, pErr := repl.Send(ctx, ba); pErr != nil {
return pErr.GoError()
}
return nil
}
示例13: SendWrappedWith
// SendWrappedWith is a convenience function which wraps the request in a batch
// and sends it via the provided Sender at the given timestamp. It returns the
// unwrapped response or an error. It's valid to pass a `nil` context;
// context.Background() is used in that case.
func SendWrappedWith(sender Sender, ctx context.Context, h roachpb.Header, args roachpb.Request) (roachpb.Response, error) {
if ctx == nil {
ctx = context.Background()
}
ba := roachpb.BatchRequest{}
ba.Header = h
ba.Add(args)
br, pErr := sender.Send(ctx, ba)
if err := pErr.GoError(); err != nil {
return nil, err
}
unwrappedReply := br.Responses[0].GetInner()
unwrappedReply.Header().Txn = br.Txn
return unwrappedReply, nil
}
示例14: clientHasAbandoned
func (tc *TxnCoordSender) clientHasAbandoned(txnID uuid.UUID) {
tc.Lock()
txnMeta := tc.txns[txnID]
var intentSpans []roachpb.Span
// TODO(tschottdorf): should we be more proactive here?
// The client might be continuing the transaction
// through another coordinator, but in the most likely
// case it's just gone and the open transaction record
// could block concurrent operations.
if log.V(1) {
log.Infof("transaction %s abandoned; stopping heartbeat", txnMeta.txn)
}
// Grab the intents here to avoid potential race.
intentSpans = collectIntentSpans(txnMeta.keys)
txnMeta.keys.Clear()
// txnMeta.txn is possibly replaced concurrently,
// so grab a copy before unlocking.
txn := txnMeta.txn.Clone()
tc.Unlock()
ba := roachpb.BatchRequest{}
ba.Txn = &txn
// Actively abort the transaction and its intents since we assume it's abandoned.
et := &roachpb.EndTransactionRequest{
Span: roachpb.Span{
Key: txn.Key,
},
Commit: false,
IntentSpans: intentSpans,
}
ba.Add(et)
tc.stopper.RunAsyncTask(func() {
// Use the wrapped sender since the normal Sender
// does not allow clients to specify intents.
// TODO(tschottdorf): not using the existing context here since that
// leads to use-after-finish of the contained trace. Should fork off
// before the goroutine.
if _, pErr := tc.wrapped.Send(context.Background(), ba); pErr != nil {
if log.V(1) {
log.Warningf("abort due to inactivity failed for %s: %s ", txn, pErr)
}
}
})
}
示例15: rangeLookup
// rangeLookup implements the rangeDescriptorDB interface. It looks up
// the descriptors for the given (meta) key.
func (ls *LocalSender) rangeLookup(key roachpb.Key, options lookupOptions, _ *roachpb.RangeDescriptor) ([]roachpb.RangeDescriptor, error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
RequestHeader: roachpb.RequestHeader{
Key: key,
},
MaxRanges: 1,
ConsiderIntents: options.considerIntents,
Reverse: options.useReverseScan,
})
br, pErr := ls.Send(context.Background(), ba)
if pErr != nil {
return nil, pErr.GoError()
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}