本文整理匯總了Golang中github.com/cockroachdb/cockroach/keys.Range函數的典型用法代碼示例。如果您正苦於以下問題:Golang Range函數的具體用法?Golang Range怎麽用?Golang Range使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Range函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Send
// Send forwards the call to the single store. This is a poor man's
// version of kv.TxnCoordSender, but it serves the purposes of
// supporting tests in this package. Transactions are not supported.
// Since kv/ depends on storage/, we can't get access to a
// TxnCoordSender from here.
// TODO(tschottdorf): {kv->storage}.LocalSender
func (db *testSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if et, ok := ba.GetArg(roachpb.EndTransaction); ok {
return nil, roachpb.NewError(util.Errorf("%s method not supported", et.Method()))
}
// Lookup range and direct request.
key, endKey := keys.Range(ba)
rng := db.store.LookupReplica(key, endKey)
if rng == nil {
return nil, roachpb.NewError(roachpb.NewRangeKeyMismatchError(key, endKey, nil))
}
ba.RangeID = rng.Desc().RangeID
replica := rng.GetReplica()
if replica == nil {
return nil, roachpb.NewError(util.Errorf("own replica missing in range"))
}
ba.Replica = *replica
br, pErr := db.store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(db.store, br))
}
if pErr != nil {
return nil, pErr
}
return br, nil
}
示例2: SendCallConverted
// SendCallConverted is a wrapped to go from the (ctx,call) interface to the
// one used by batch.Sender.
// TODO(tschottdorf): remove when new proto.Call is gone.
func SendCallConverted(sender BatchSender, ctx context.Context, call proto.Call) {
call, unwrap := MaybeWrapCall(call)
defer unwrap(call)
{
br := *call.Args.(*proto.BatchRequest)
if len(br.Requests) == 0 {
panic(br)
}
br.Key, br.EndKey = keys.Range(br)
if bytes.Equal(br.Key, proto.KeyMax) {
panic(br)
}
}
reply, err := sender.SendBatch(ctx, *call.Args.(*proto.BatchRequest))
if reply != nil {
call.Reply.Reset() // required for BatchRequest (concats response otherwise)
gogoproto.Merge(call.Reply, reply)
}
if call.Reply.Header().GoError() != nil {
panic(proto.ErrorUnexpectedlySet)
}
if err != nil {
call.Reply.Header().SetGoError(err)
}
}
示例3: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
var store *Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, err = ls.lookupReplica(rs.Key, rs.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
if err != nil {
return nil, roachpb.NewError(err)
}
sp, cleanupSp := tracing.SpanFromContext(opStores, store.Tracer(), ctx)
defer cleanupSp()
if ba.Txn != nil {
// For calls that read data within a txn, we keep track of timestamps
// observed from the various participating nodes' HLC clocks. If we have
// a timestamp on file for this Node which is smaller than MaxTimestamp,
// we can lower MaxTimestamp accordingly. If MaxTimestamp drops below
// OrigTimestamp, we effectively can't see uncertainty restarts any
// more.
// Note that it's not an issue if MaxTimestamp propagates back out to
// the client via a returned Transaction update - when updating a Txn
// from another, the larger MaxTimestamp wins.
if maxTS, ok := ba.Txn.GetObservedTimestamp(ba.Replica.NodeID); ok && maxTS.Less(ba.Txn.MaxTimestamp) {
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// The uncertainty window is [OrigTimestamp, maxTS), so if that window
// is empty, there won't be any uncertainty restarts.
if !ba.Txn.OrigTimestamp.Less(maxTS) {
sp.LogEvent("read has no clock uncertainty")
}
shallowTxn.MaxTimestamp.Backward(maxTS)
ba.Txn = &shallowTxn
}
}
br, pErr := store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例4: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
sp := tracing.SpanFromContext(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=OrigTimestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
sp.LogEvent("read has no clock uncertainty")
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// We set to OrigTimestamp because that works for both SNAPSHOT and
// SERIALIZABLE: If we used Timestamp instead, we could run into
// unnecessary retries at SNAPSHOT. For example, a SNAPSHOT txn at
// OrigTimestamp = 1000.0, Timestamp = 2000.0, MaxTimestamp = 3000.0
// will always read at 1000, so a MaxTimestamp of 2000 will still let
// it restart with uncertainty when it finds a value in (1000, 2000).
shallowTxn.MaxTimestamp = ba.Txn.OrigTimestamp
ba.Txn = &shallowTxn
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例5: TestRetryOnWrongReplicaError
// TestRetryOnWrongReplicaError sets up a DistSender on a minimal gossip
// network and a mock of rpc.Send, and verifies that the DistSender correctly
// retries upon encountering a stale entry in its range descriptor cache.
func TestRetryOnWrongReplicaError(t *testing.T) {
defer leaktest.AfterTest(t)
g, s := makeTestGossip(t)
defer s()
// Updated below, after it has first been returned.
badStartKey := roachpb.RKey("m")
newRangeDescriptor := testRangeDescriptor
goodStartKey := newRangeDescriptor.StartKey
newRangeDescriptor.StartKey = badStartKey
descStale := true
var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
ba := getArgs(testAddress).(*roachpb.BatchRequest)
rs := keys.Range(*ba)
if _, ok := ba.GetArg(roachpb.RangeLookup); ok {
if !descStale && bytes.HasPrefix(rs.Key, keys.Meta2Prefix) {
t.Errorf("unexpected extra lookup for non-stale replica descriptor at %s",
rs.Key)
}
br := getReply().(*roachpb.BatchResponse)
r := &roachpb.RangeLookupResponse{}
r.Ranges = append(r.Ranges, newRangeDescriptor)
br.Add(r)
// If we just returned the stale descriptor, set up returning the
// good one next time.
if bytes.HasPrefix(rs.Key, keys.Meta2Prefix) {
if newRangeDescriptor.StartKey.Equal(badStartKey) {
newRangeDescriptor.StartKey = goodStartKey
} else {
descStale = false
}
}
return []proto.Message{br}, nil
}
// When the Scan first turns up, update the descriptor for future
// range descriptor lookups.
if !newRangeDescriptor.StartKey.Equal(goodStartKey) {
return nil, &roachpb.RangeKeyMismatchError{RequestStartKey: rs.Key.AsRawKey(),
RequestEndKey: rs.EndKey.AsRawKey()}
}
return []proto.Message{ba.CreateReply()}, nil
}
ctx := &DistSenderContext{
RPCSend: testFn,
}
ds := NewDistSender(ctx, g)
scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 0)
if _, err := client.SendWrapped(ds, nil, scan); err != nil {
t.Errorf("scan encountered error: %s", err)
}
}
示例6: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
sp := tracing.SpanFromContext(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
sp.LogEvent("read has no clock uncertainty")
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
shallowTxn.MaxTimestamp = ba.Txn.Timestamp
ba.Txn = &shallowTxn
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例7: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *LocalSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
trace := tracer.FromCtx(ctx)
var store *storage.Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
key, endKey := keys.Range(ba)
rangeID, repl, err = ls.lookupReplica(key, endKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if err != nil {
return nil, roachpb.NewError(err)
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
ba.Txn.MaxTimestamp = ba.Txn.Timestamp
}
br, pErr := store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例8: TestSequenceUpdateOnMultiRangeQueryLoop
// TestSequenceUpdateOnMultiRangeQueryLoop reproduces #3206 and
// verifies that the sequence is updated in the DistSender
// multi-range-query loop.
//
// More specifically, the issue was that DistSender might send
// multiple batch requests to the same replica when it finds a
// post-split range descriptor in the cache while the split has not
// yet been fully completed. By giving a higher sequence to the second
// request, we can avoid an infinite txn restart error (otherwise
// caused by hitting the sequence cache).
func TestSequenceUpdateOnMultiRangeQueryLoop(t *testing.T) {
defer leaktest.AfterTest(t)
g, s := makeTestGossip(t)
defer s()
if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{NodeID: 1}); err != nil {
t.Fatal(err)
}
nd := &roachpb.NodeDescriptor{
NodeID: roachpb.NodeID(1),
Address: util.MakeUnresolvedAddr(testAddress.Network(), testAddress.String()),
}
if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(1)), nd, time.Hour); err != nil {
t.Fatal(err)
}
// Fill mockRangeDescriptorDB with two descriptors.
var descriptor1 = roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKeyMin,
EndKey: roachpb.RKey("b"),
Replicas: []roachpb.ReplicaDescriptor{
{
NodeID: 1,
StoreID: 1,
},
},
}
var descriptor2 = roachpb.RangeDescriptor{
RangeID: 2,
StartKey: roachpb.RKey("b"),
EndKey: roachpb.RKey("c"),
Replicas: []roachpb.ReplicaDescriptor{
{
NodeID: 1,
StoreID: 1,
},
},
}
descDB := mockRangeDescriptorDB(func(key roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
desc := descriptor1
if key.Equal(roachpb.RKey("b")) {
desc = descriptor2
}
return []roachpb.RangeDescriptor{desc}, nil
})
// Define our rpcSend stub which checks the span of the batch
// requests. The first request should be the point request on
// "a". The second request should be on "b". The sequence of the
// second request will be incremented by one from that of the
// first request.
first := true
var firstSequence uint32
var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
if method != "Node.Batch" {
return nil, util.Errorf("unexpected method %v", method)
}
ba := getArgs(testAddress).(*roachpb.BatchRequest)
rs := keys.Range(*ba)
if first {
if !(rs.Key.Equal(roachpb.RKey("a")) && rs.EndKey.Equal(roachpb.RKey("a").Next())) {
t.Errorf("unexpected span [%s,%s)", rs.Key, rs.EndKey)
}
first = false
firstSequence = ba.Txn.Sequence
} else {
if !(rs.Key.Equal(roachpb.RKey("b")) && rs.EndKey.Equal(roachpb.RKey("b").Next())) {
t.Errorf("unexpected span [%s,%s)", rs.Key, rs.EndKey)
}
if ba.Txn.Sequence != firstSequence+1 {
t.Errorf("unexpected sequence; expected %d, but got %d", firstSequence+1, ba.Txn.Sequence)
}
}
return []proto.Message{ba.CreateReply()}, nil
}
ctx := &DistSenderContext{
RPCSend: testFn,
RangeDescriptorDB: descDB,
}
ds := NewDistSender(ctx, g)
// Send a batch request containing two puts.
var ba roachpb.BatchRequest
ba.Txn = &roachpb.Transaction{Name: "test"}
val := roachpb.MakeValueFromString("val")
ba.Add(roachpb.NewPut(roachpb.Key("a"), val).(*roachpb.PutRequest))
//.........這裏部分代碼省略.........
示例9: TestTruncateWithSpanAndDescriptor
// TestTruncateWithSpanAndDescriptor verifies that a batch request is truncated with a
// range span and the range of a descriptor found in cache.
func TestTruncateWithSpanAndDescriptor(t *testing.T) {
defer leaktest.AfterTest(t)
g, s := makeTestGossip(t)
defer s()
g.SetNodeID(1)
if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{NodeID: 1}); err != nil {
t.Fatal(err)
}
nd := &roachpb.NodeDescriptor{
NodeID: roachpb.NodeID(1),
Address: util.MakeUnresolvedAddr(testAddress.Network(), testAddress.String()),
}
if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(1)), nd, time.Hour); err != nil {
t.Fatal(err)
}
// Fill mockRangeDescriptorDB with two descriptors. When a
// range descriptor is looked up by key "b", return the second
// descriptor whose range is ["a", "c") and partially overlaps
// with the first descriptor's range.
var descriptor1 = roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKeyMin,
EndKey: roachpb.RKey("b"),
Replicas: []roachpb.ReplicaDescriptor{
{
NodeID: 1,
StoreID: 1,
},
},
}
var descriptor2 = roachpb.RangeDescriptor{
RangeID: 2,
StartKey: roachpb.RKey("a"),
EndKey: roachpb.RKey("c"),
Replicas: []roachpb.ReplicaDescriptor{
{
NodeID: 1,
StoreID: 1,
},
},
}
descDB := mockRangeDescriptorDB(func(key roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
desc := descriptor1
if key.Equal(roachpb.RKey("b")) {
desc = descriptor2
}
return []roachpb.RangeDescriptor{desc}, nil
})
// Define our rpcSend stub which checks the span of the batch
// requests. The first request should be the point request on
// "a". The second request should be on "b".
first := true
var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
if method != "Node.Batch" {
return nil, util.Errorf("unexpected method %v", method)
}
ba := getArgs(testAddress).(*roachpb.BatchRequest)
rs := keys.Range(*ba)
if first {
if !(rs.Key.Equal(roachpb.RKey("a")) && rs.EndKey.Equal(roachpb.RKey("a").Next())) {
t.Errorf("Unexpected span [%s,%s)", rs.Key, rs.EndKey)
}
first = false
} else {
if !(rs.Key.Equal(roachpb.RKey("b")) && rs.EndKey.Equal(roachpb.RKey("b").Next())) {
t.Errorf("Unexpected span [%s,%s)", rs.Key, rs.EndKey)
}
}
batchReply := getReply().(*roachpb.BatchResponse)
reply := &roachpb.PutResponse{}
batchReply.Add(reply)
return []proto.Message{batchReply}, nil
}
ctx := &DistSenderContext{
RPCSend: testFn,
RangeDescriptorDB: descDB,
}
ds := NewDistSender(ctx, g)
// Send a batch request contains two puts. In the first
// attempt, the range of the descriptor found in the cache is
// ["a", "b"). The request is truncated to contain only the put
// on "a".
//
// In the second attempt, The range of the descriptor found in
// the cache is ["a", c"), but the put on "a" will not be
// resent. The request is truncated to contain only the put on "b".
ba := roachpb.BatchRequest{}
ba.Txn = &roachpb.Transaction{Name: "test"}
val := roachpb.MakeValueFromString("val")
ba.Add(roachpb.NewPut(keys.RangeTreeNodeKey(roachpb.RKey("a")), val).(*roachpb.PutRequest))
ba.Add(roachpb.NewPut(keys.RangeTreeNodeKey(roachpb.RKey("b")), val).(*roachpb.PutRequest))
//.........這裏部分代碼省略.........
示例10: TestMultiRangeMergeStaleDescriptor
// TestMultiRangeMergeStaleDescriptor simulates the situation in which the
// DistSender executes a multi-range scan which encounters the stale descriptor
// of a range which has since incorporated its right neighbor by means of a
// merge. It is verified that the DistSender scans the correct keyrange exactly
// once.
func TestMultiRangeMergeStaleDescriptor(t *testing.T) {
defer leaktest.AfterTest(t)
g, s := makeTestGossip(t)
defer s()
// Assume we have two ranges, [a-b) and [b-KeyMax).
merged := false
// The stale first range descriptor which is unaware of the merge.
var FirstRange = roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKey("a"),
EndKey: roachpb.RKey("b"),
Replicas: []roachpb.ReplicaDescriptor{
{
NodeID: 1,
StoreID: 1,
},
},
}
// The merged descriptor, which will be looked up after having processed
// the stale range [a,b).
var mergedRange = roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKey("a"),
EndKey: roachpb.RKeyMax,
Replicas: []roachpb.ReplicaDescriptor{
{
NodeID: 1,
StoreID: 1,
},
},
}
// Assume we have two key-value pairs, a=1 and c=2.
existingKVs := []roachpb.KeyValue{
{Key: roachpb.Key("a"), Value: roachpb.MakeValueFromString("1")},
{Key: roachpb.Key("c"), Value: roachpb.MakeValueFromString("2")},
}
var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
if method != "Node.Batch" {
t.Fatalf("unexpected method:%s", method)
}
ba := getArgs(testAddress).(*roachpb.BatchRequest)
rs := keys.Range(*ba)
batchReply := getReply().(*roachpb.BatchResponse)
reply := &roachpb.ScanResponse{}
batchReply.Add(reply)
results := []roachpb.KeyValue{}
for _, curKV := range existingKVs {
if rs.Key.Less(keys.Addr(curKV.Key).Next()) && keys.Addr(curKV.Key).Less(rs.EndKey) {
results = append(results, curKV)
}
}
reply.Rows = results
return []proto.Message{batchReply}, nil
}
ctx := &DistSenderContext{
RPCSend: testFn,
RangeDescriptorDB: mockRangeDescriptorDB(func(key roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
if !merged {
// Assume a range merge operation happened.
merged = true
return []roachpb.RangeDescriptor{FirstRange}, nil
}
return []roachpb.RangeDescriptor{mergedRange}, nil
}),
}
ds := NewDistSender(ctx, g)
scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 10).(*roachpb.ScanRequest)
// Set the Txn info to avoid an OpRequiresTxnError.
reply, err := client.SendWrappedWith(ds, nil, roachpb.Header{
Txn: &roachpb.Transaction{},
}, scan)
if err != nil {
t.Fatalf("scan encountered error: %s", err)
}
sr := reply.(*roachpb.ScanResponse)
if !reflect.DeepEqual(existingKVs, sr.Rows) {
t.Fatalf("expect get %v, actual get %v", existingKVs, sr.Rows)
}
}
示例11: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs := keys.Range(ba)
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
considerIntents := false
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var needAnother bool
var pErr *roachpb.Error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, pErr = ds.getDescriptors(rs, considerIntents, isReverse)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
if (isReverse && !desc.ContainsKeyRange(desc.StartKey, rs.EndKey)) || (!isReverse && !desc.ContainsKeyRange(rs.Key, desc.EndKey)) {
evictDesc()
continue
}
curReply, pErr = func() (*roachpb.BatchResponse, *roachpb.Error) {
// Truncate the request to our current key range.
intersected, iErr := rs.Intersect(desc)
if iErr != nil {
return nil, roachpb.NewError(iErr)
}
truncBA, numActive, trErr := truncate(ba, intersected)
if numActive == 0 && trErr == nil {
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, roachpb.NewErrorf("truncation resulted in empty batch on [%s,%s): %s",
rs.Key, rs.EndKey, ba)
}
if trErr != nil {
return nil, roachpb.NewError(trErr)
}
return ds.sendSingleRange(trace, truncBA, desc)
}()
// If sending succeeded, break this loop.
if pErr == nil {
break
}
//.........這裏部分代碼省略.........
示例12: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
// TODO(radu): when contexts are properly plumbed, we should be able to get
// the tracer from ctx, not from the DistSender.
ctx, cleanup := tracing.EnsureContext(ctx, tracing.TracerFromCtx(ds.Ctx))
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken *evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
var numAttempts int
for r := retry.StartWithCtx(ctx, ds.rpcRetryOptions); r.Next(); {
numAttempts++
{
const magicLogCurAttempt = 20
var seq int32
if ba.Txn != nil {
seq = ba.Txn.Sequence
}
if numAttempts%magicLogCurAttempt == 0 || seq%magicLogCurAttempt == 0 {
// Log a message if a request appears to get stuck for a long
// time or, potentially, forever. See #8975.
// The local counter captures this loop here; the Sequence number
// should capture anything higher up (as it needs to be
// incremented every time this method is called).
log.Warningf(
ctx,
"%d retries for an RPC at sequence %d, last error was: %s, remaining key ranges %s: %s",
numAttempts, seq, pErr, rs, ba,
)
}
}
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
var err error
desc, needAnother, evictToken, err = ds.getDescriptors(ctx, rs, evictToken, isReverse)
// getDescriptors may fail retryably if, for example, the first
// range isn't available via Gossip. Assume that all errors at
// this level are retryable. Non-retryable errors would be for
// things like malformed requests which we should have checked
// for before reaching this point.
if err != nil {
log.Trace(ctx, "range descriptor lookup failed: "+err.Error())
if log.V(1) {
log.Warning(ctx, err)
}
pErr = roachpb.NewError(err)
continue
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
//.........這裏部分代碼省略.........
示例13: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc).
func (ds *DistSender) sendChunk(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
// TODO(tschottdorf): prepare for removing Key and EndKey from BatchRequest,
// making sure that anything that relies on them goes bust.
ba.Key, ba.EndKey = nil, nil
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
from, to := keys.Range(ba)
var br *proto.BatchResponse
// Send the request to one range per iteration.
for {
options := lookupOptions{
useReverseScan: isReverse,
}
var curReply *proto.BatchResponse
var desc *proto.RangeDescriptor
var needAnother bool
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, err = ds.getDescriptors(from, to, options)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(retry.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if needAnother && ba.Txn == nil && ba.IsRange() &&
ba.ReadConsistency != proto.INCONSISTENT {
return nil, &proto.OpRequiresTxnError{}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
if (isReverse && !desc.ContainsKeyRange(desc.StartKey, to)) || (!isReverse && !desc.ContainsKeyRange(from, desc.EndKey)) {
evictDesc()
continue
}
curReply, err = func() (*proto.BatchResponse, error) {
// Truncate the request to our current key range.
untruncate, numActive, trErr := truncate(&ba, desc, from, to)
if numActive == 0 {
untruncate()
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, util.Errorf("truncation resulted in empty batch on [%s,%s): %s",
from, to, ba)
}
defer untruncate()
if trErr != nil {
return nil, trErr
}
// TODO(tschottdorf): make key range on batch redundant. The
// requests within dictate it anyways.
ba.Key, ba.EndKey = keys.Range(ba)
reply, err := ds.sendAttempt(trace, ba, desc)
ba.Key, ba.EndKey = nil, nil
if err != nil {
if log.V(0 /* TODO(tschottdorf): 1 */) {
log.Warningf("failed to invoke %s: %s", ba, err)
}
}
return reply, err
}()
// If sending succeeded, break this loop.
if err == nil {
break
}
//.........這裏部分代碼省略.........
示例14: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
ctx, cleanup := tracing.EnsureContext(ctx, ds.Tracer)
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
desc, needAnother, evictToken, pErr = ds.getDescriptors(rs, evictToken, isReverse)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
log.Trace(ctx, "range descriptor lookup failed: "+pErr.String())
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
} else {
log.Trace(ctx, "looked up range descriptor")
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
includesFrontOfCurSpan := func(rd *roachpb.RangeDescriptor) bool {
if isReverse {
// This approach is needed because rs.EndKey is exclusive.
return desc.ContainsKeyRange(desc.StartKey, rs.EndKey)
}
return desc.ContainsKey(rs.Key)
}
if !includesFrontOfCurSpan(desc) {
if err := evictToken.Evict(); err != nil {
//.........這裏部分代碼省略.........
示例15: newRSpan
// newRSpan returns an rSpan encompassing all the keys in the batch request.
func newRSpan(ba roachpb.BatchRequest) rSpan {
key, endKey := keys.Range(ba)
return rSpan{key: key, endKey: endKey}
}