本文整理汇总了Golang中github.com/cockroachdb/cockroach/roachpb.BatchRequest.Split方法的典型用法代码示例。如果您正苦于以下问题:Golang BatchRequest.Split方法的具体用法?Golang BatchRequest.Split怎么用?Golang BatchRequest.Split使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/roachpb.BatchRequest
的用法示例。
在下文中一共展示了BatchRequest.Split方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Send
// Send implements Sender.
// TODO(tschottdorf): We actually don't want to chop EndTransaction off for
// single-range requests (but that happens now since EndTransaction has the
// isAlone flag). Whether it is one or not is unknown right now (you can only
// find out after you've sent to the Range/looked up a descriptor that suggests
// that you're multi-range. In those cases, the wrapped sender should return an
// error so that we split and retry once the chunk which contains
// EndTransaction (i.e. the last one).
func (cs *chunkingSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if len(ba.Requests) < 1 {
panic("empty batch")
}
parts := ba.Split()
var rplChunks []*roachpb.BatchResponse
for _, part := range parts {
ba.Requests = part
// Increase the sequence counter to account for the fact that while
// chunking, we're likely sending multiple requests to the same Replica.
ba.SetNewRequest()
rpl, err := cs.f(ctx, ba)
if err != nil {
return nil, err
}
// Propagate transaction from last reply to next request. The final
// update is taken and put into the response's main header.
ba.Txn.Update(rpl.Header().Txn)
rplChunks = append(rplChunks, rpl)
}
reply := rplChunks[0]
for _, rpl := range rplChunks[1:] {
reply.Responses = append(reply.Responses, rpl.Responses...)
}
lastHeader := rplChunks[len(rplChunks)-1].BatchResponse_Header
reply.Error = lastHeader.Error
reply.Timestamp = lastHeader.Timestamp
reply.Txn = ba.Txn
return reply, nil
}
示例2: Send
// Send implements Sender.
// TODO(tschottdorf): We actually don't want to chop EndTransaction off for
// single-range requests (but that happens now since EndTransaction has the
// isAlone flag). Whether it is one or not is unknown right now (you can only
// find out after you've sent to the Range/looked up a descriptor that suggests
// that you're multi-range. In those cases, the wrapped sender should return an
// error so that we split and retry once the chunk which contains
// EndTransaction (i.e. the last one).
func (cs *chunkingSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if len(ba.Requests) < 1 {
panic("empty batch")
}
// Deterministically create ClientCmdIDs for all parts of the batch if
// a CmdID is already set (otherwise, leave them empty).
var nextID func() roachpb.ClientCmdID
empty := roachpb.ClientCmdID{}
if empty == ba.CmdID {
nextID = func() roachpb.ClientCmdID {
return empty
}
} else {
rng := rand.New(rand.NewSource(ba.CmdID.Random))
id := ba.CmdID
nextID = func() roachpb.ClientCmdID {
curID := id // copy
id.Random = rng.Int63() // adjust for next call
return curID
}
}
parts := ba.Split()
var rplChunks []*roachpb.BatchResponse
for _, part := range parts {
ba.Requests = part
ba.CmdID = nextID()
rpl, err := cs.f(ctx, ba)
if err != nil {
return nil, err
}
// Propagate transaction from last reply to next request. The final
// update is taken and put into the response's main header.
ba.Txn.Update(rpl.Header().Txn)
rplChunks = append(rplChunks, rpl)
}
reply := rplChunks[0]
for _, rpl := range rplChunks[1:] {
reply.Responses = append(reply.Responses, rpl.Responses...)
}
lastHeader := rplChunks[len(rplChunks)-1].BatchResponse_Header
reply.Error = lastHeader.Error
reply.Timestamp = lastHeader.Timestamp
reply.Txn = ba.Txn
return reply, nil
}
示例3: Send
// Send implements the batch.Sender interface. It subdivides
// the Batch into batches admissible for sending (preventing certain
// illegal mixtures of requests), executes each individual part
// (which may span multiple ranges), and recombines the response.
// When the request spans ranges, it is split up and the corresponding
// ranges queried serially, in ascending order.
// In particular, the first write in a transaction may not be part of the first
// request sent. This is relevant since the first write is a BeginTransaction
// request, thus opening up a window of time during which there may be intents
// of a transaction, but no entry. Pushing such a transaction will succeed, and
// may lead to the transaction being aborted early.
func (ds *DistSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if ba.ReadConsistency == roachpb.INCONSISTENT && ba.Timestamp.Equal(roachpb.ZeroTimestamp) {
ba.Timestamp = ds.clock.Now()
}
if ba.Txn != nil && len(ba.Txn.CertainNodes.Nodes) == 0 {
// Ensure the local NodeID is marked as free from clock offset;
// the transaction's timestamp was taken off the local clock.
if nDesc := ds.getNodeDescriptor(); nDesc != nil {
// TODO(tschottdorf): bad style to assume that ba.Txn is ours.
// No race here, but should have a better way of doing this.
// TODO(tschottdorf): future refactoring should move this to txn
// creation in TxnCoordSender, which is currently unaware of the
// NodeID (and wraps *DistSender through client.Sender since it
// also needs test compatibility with *LocalSender).
ba.Txn.CertainNodes.Add(nDesc.NodeID)
}
}
if len(ba.Requests) < 1 {
panic("empty batch")
}
var rplChunks []*roachpb.BatchResponse
parts := ba.Split(false /* don't split ET */)
for len(parts) > 0 {
part := parts[0]
ba.Requests = part
rpl, pErr, shouldSplitET := ds.sendChunk(ctx, ba)
if shouldSplitET {
// If we tried to send a single round-trip EndTransaction but
// it looks like it's going to hit multiple ranges, split it
// here and try again.
if len(parts) != 1 {
panic("EndTransaction not in last chunk of batch")
}
parts = ba.Split(true /* split ET */)
if len(parts) != 2 {
panic("split of final EndTransaction chunk resulted in != 2 parts")
}
continue
}
if pErr != nil {
return nil, pErr
}
// Propagate transaction from last reply to next request. The final
// update is taken and put into the response's main header.
ba.Txn.Update(rpl.Header().Txn)
rplChunks = append(rplChunks, rpl)
parts = parts[1:]
}
reply := rplChunks[0]
for _, rpl := range rplChunks[1:] {
reply.Responses = append(reply.Responses, rpl.Responses...)
}
*reply.Header() = rplChunks[len(rplChunks)-1].BatchResponse_Header
return reply, nil
}
示例4: Send
// Send implements the batch.Sender interface. It subdivides
// the Batch into batches admissible for sending (preventing certain
// illegal mixtures of requests), executes each individual part
// (which may span multiple ranges), and recombines the response.
// When the request spans ranges, it is split up and the corresponding
// ranges queried serially, in ascending order.
// In particular, the first write in a transaction may not be part of the first
// request sent. This is relevant since the first write is a BeginTransaction
// request, thus opening up a window of time during which there may be intents
// of a transaction, but no entry. Pushing such a transaction will succeed, and
// may lead to the transaction being aborted early.
func (ds *DistSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
tracing.AnnotateTrace()
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if ba.ReadConsistency == roachpb.INCONSISTENT && ba.Timestamp.Equal(hlc.ZeroTimestamp) {
ba.Timestamp = ds.clock.Now()
}
if ba.Txn != nil {
// Make a copy here since the code below modifies it in different places.
// TODO(tschottdorf): be smarter about this - no need to do it for
// requests that don't get split.
txnClone := ba.Txn.Clone()
ba.Txn = &txnClone
if len(ba.Txn.ObservedTimestamps) == 0 {
// Ensure the local NodeID is marked as free from clock offset;
// the transaction's timestamp was taken off the local clock.
if nDesc := ds.getNodeDescriptor(); nDesc != nil {
// TODO(tschottdorf): future refactoring should move this to txn
// creation in TxnCoordSender, which is currently unaware of the
// NodeID (and wraps *DistSender through client.Sender since it
// also needs test compatibility with *LocalSender).
//
// Taking care below to not modify any memory referenced from
// our BatchRequest which may be shared with others.
//
// We already have a clone of our txn (see above), so we can
// modify it freely.
//
// Zero the existing data. That makes sure that if we had
// something of size zero but with capacity, we don't re-use the
// existing space (which others may also use). This is just to
// satisfy paranoia/OCD and not expected to matter in practice.
ba.Txn.ResetObservedTimestamps()
// OrigTimestamp is the HLC timestamp at which the Txn started, so
// this effectively means no more uncertainty on this node.
ba.Txn.UpdateObservedTimestamp(nDesc.NodeID, ba.Txn.OrigTimestamp)
}
}
}
if len(ba.Requests) < 1 {
panic("empty batch")
}
if ba.MaxSpanRequestKeys != 0 {
// Verify that the batch contains only specific range requests or the
// Begin/EndTransactionRequest. Verify that a batch with a ReverseScan
// only contains ReverseScan range requests.
isReverse := ba.IsReverse()
for _, req := range ba.Requests {
inner := req.GetInner()
switch inner.(type) {
case *roachpb.ScanRequest, *roachpb.DeleteRangeRequest:
// Accepted range requests. All other range requests are still
// not supported.
// TODO(vivek): don't enumerate all range requests.
if isReverse {
return nil, roachpb.NewErrorf("batch with limit contains both forward and reverse scans")
}
case *roachpb.BeginTransactionRequest, *roachpb.EndTransactionRequest, *roachpb.ReverseScanRequest:
continue
default:
return nil, roachpb.NewErrorf("batch with limit contains %T request", inner)
}
}
}
var rplChunks []*roachpb.BatchResponse
parts := ba.Split(false /* don't split ET */)
if len(parts) > 1 && ba.MaxSpanRequestKeys != 0 {
// We already verified above that the batch contains only scan requests of the same type.
// Such a batch should never need splitting.
panic("batch with MaxSpanRequestKeys needs splitting")
}
for len(parts) > 0 {
part := parts[0]
ba.Requests = part
rpl, pErr, shouldSplitET := ds.sendChunk(ctx, ba)
if shouldSplitET {
// If we tried to send a single round-trip EndTransaction but
// it looks like it's going to hit multiple ranges, split it
// here and try again.
if len(parts) != 1 {
panic("EndTransaction not in last chunk of batch")
//.........这里部分代码省略.........
示例5: Send
// Send implements the batch.Sender interface. It subdivides
// the Batch into batches admissible for sending (preventing certain
// illegal mixtures of requests), executes each individual part
// (which may span multiple ranges), and recombines the response.
// When the request spans ranges, it is split up and the corresponding
// ranges queried serially, in ascending order.
// In particular, the first write in a transaction may not be part of the first
// request sent. This is relevant since the first write is a BeginTransaction
// request, thus opening up a window of time during which there may be intents
// of a transaction, but no entry. Pushing such a transaction will succeed, and
// may lead to the transaction being aborted early.
func (ds *DistSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
tracing.AnnotateTrace()
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if ba.ReadConsistency == roachpb.INCONSISTENT && ba.Timestamp.Equal(roachpb.ZeroTimestamp) {
ba.Timestamp = ds.clock.Now()
}
if ba.Txn != nil && len(ba.Txn.CertainNodes.Nodes) == 0 {
// Ensure the local NodeID is marked as free from clock offset;
// the transaction's timestamp was taken off the local clock.
if nDesc := ds.getNodeDescriptor(); nDesc != nil {
// TODO(tschottdorf): future refactoring should move this to txn
// creation in TxnCoordSender, which is currently unaware of the
// NodeID (and wraps *DistSender through client.Sender since it
// also needs test compatibility with *LocalSender).
//
// Taking care below to not modify any memory referenced from
// our BatchRequest which may be shared with others.
// First, get a shallow clone of our txn (since that holds the
// NodeList struct).
txnShallow := *ba.Txn
// Next, zero out the NodeList pointer. That makes sure that
// if we had something of size zero but with capacity, we don't
// re-use the existing space (which others may also use).
txnShallow.CertainNodes.Nodes = nil
txnShallow.CertainNodes.Add(nDesc.NodeID)
ba.Txn = &txnShallow
}
}
if len(ba.Requests) < 1 {
panic("empty batch")
}
var rplChunks []*roachpb.BatchResponse
parts := ba.Split(false /* don't split ET */)
for len(parts) > 0 {
part := parts[0]
ba.Requests = part
rpl, pErr, shouldSplitET := ds.sendChunk(ctx, ba)
if shouldSplitET {
// If we tried to send a single round-trip EndTransaction but
// it looks like it's going to hit multiple ranges, split it
// here and try again.
if len(parts) != 1 {
panic("EndTransaction not in last chunk of batch")
}
parts = ba.Split(true /* split ET */)
if len(parts) != 2 {
panic("split of final EndTransaction chunk resulted in != 2 parts")
}
continue
}
if pErr != nil {
return nil, pErr
}
// Propagate transaction from last reply to next request. The final
// update is taken and put into the response's main header.
ba.Txn.Update(rpl.Header().Txn)
rplChunks = append(rplChunks, rpl)
parts = parts[1:]
}
reply := rplChunks[0]
for _, rpl := range rplChunks[1:] {
reply.Responses = append(reply.Responses, rpl.Responses...)
}
*reply.Header() = rplChunks[len(rplChunks)-1].BatchResponse_Header
return reply, nil
}