本文整理汇总了Golang中github.com/cockroachdb/cockroach/proto.RangeDescriptor类的典型用法代码示例。如果您正苦于以下问题:Golang RangeDescriptor类的具体用法?Golang RangeDescriptor怎么用?Golang RangeDescriptor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RangeDescriptor类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestSendRPCRetry
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on
// second address, the second reply should be successfully returned back.
func TestSendRPCRetry(t *testing.T) {
defer leaktest.AfterTest(t)
g, s := makeTestGossip(t)
defer s()
if err := g.SetNodeDescriptor(&proto.NodeDescriptor{NodeID: 1}); err != nil {
t.Fatal(err)
}
// Fill RangeDescriptor with 2 replicas
var descriptor = proto.RangeDescriptor{
RaftID: 1,
StartKey: proto.Key("a"),
EndKey: proto.Key("z"),
}
for i := 1; i <= 2; i++ {
addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
nd := &proto.NodeDescriptor{
NodeID: proto.NodeID(i),
Address: proto.Addr{
Network: addr.Network(),
Address: addr.String(),
},
}
if err := g.AddInfo(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil {
t.Fatal(err)
}
descriptor.Replicas = append(descriptor.Replicas, proto.Replica{
NodeID: proto.NodeID(i),
StoreID: proto.StoreID(i),
})
}
// Define our rpcSend stub which returns success on the second address.
var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) {
if method == "Node.Scan" {
// reply from first address failed
_ = getReply()
// reply from second address succeed
reply := getReply()
reply.(*proto.ScanResponse).Rows = append([]proto.KeyValue{}, proto.KeyValue{Key: proto.Key("b"), Value: proto.Value{}})
return []interface{}{reply}, nil
}
return nil, util.Errorf("Not expected method %v", method)
}
ctx := &DistSenderContext{
rpcSend: testFn,
rangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
return []proto.RangeDescriptor{descriptor}, nil
}),
}
ds := NewDistSender(ctx, g)
call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 1)
sr := call.Reply.(*proto.ScanResponse)
ds.Send(context.Background(), call)
if err := sr.GoError(); err != nil {
t.Fatal(err)
}
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
}
示例2: Send
// Send implements the client.Sender interface. It verifies
// permissions and looks up the appropriate range based on the
// supplied key and sends the RPC according to the specified options.
//
// If the request spans multiple ranges (which is possible for Scan or
// DeleteRange requests), Send sends requests to the individual ranges
// sequentially and combines the results transparently.
//
// This may temporarily adjust the request headers, so the proto.Call
// must not be used concurrently until Send has returned.
func (ds *DistSender) Send(_ context.Context, call proto.Call) {
args := call.Args
finalReply := call.Reply
// Verify permissions.
if err := ds.verifyPermissions(call.Args); err != nil {
call.Reply.Header().SetGoError(err)
return
}
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if args.Header().ReadConsistency == proto.INCONSISTENT && args.Header().Timestamp.Equal(proto.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp proto.Timestamp) {
args.Header().Timestamp = timestamp
}(args.Header().Timestamp)
args.Header().Timestamp = ds.clock.Now()
}
// If this is a bounded request, we will change its bound as we receive
// replies. This undoes that when we return.
boundedArgs, argsBounded := args.(proto.Bounded)
if argsBounded {
defer func(bound int64) {
boundedArgs.SetBound(bound)
}(boundedArgs.GetBound())
}
defer func(key proto.Key) {
args.Header().Key = key
}(args.Header().Key)
// Retry logic for lookup of range by key and RPCs to range replicas.
curReply := finalReply
for {
call.Reply = curReply
curReply.Header().Reset()
var desc, descNext *proto.RangeDescriptor
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors).
// sendAttempt below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
desc, descNext, err = ds.getDescriptors(call)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(util.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
err = func() error {
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if descNext != nil {
defer func(endKey proto.Key) {
args.Header().EndKey = endKey
}(args.Header().EndKey)
args.Header().EndKey = desc.EndKey
}
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
return ds.sendRPC(desc.RaftID, replicas, order, args, curReply)
}()
//.........这里部分代码省略.........
示例3: containsKeyRange
func containsKeyRange(desc proto.RangeDescriptor, start, end proto.Key) bool {
return desc.ContainsKeyRange(keys.KeyAddress(start), keys.KeyAddress(end))
}
示例4: containsKey
func containsKey(desc proto.RangeDescriptor, key proto.Key) bool {
return desc.ContainsKey(keys.KeyAddress(key))
}
示例5: truncate
// truncate restricts all contained requests to the given key range.
// Even on error, the returned closure must be executed; it undoes any
// truncations performed.
// First, the boundaries of the truncation are obtained: This is the
// intersection between [from,to) and the descriptor's range.
// Secondly, all requests contained in the batch are "truncated" to
// the resulting range, inserting NoopRequest appropriately to
// replace requests which are left without a key range to operate on.
// The number of non-noop requests after truncation is returned along
// with a closure which must be executed to undo the truncation, even
// in case of an error.
// TODO(tschottdorf): Consider returning a new BatchRequest, which has more
// overhead in the common case of a batch which never needs truncation but is
// less magical.
func truncate(br *proto.BatchRequest, desc *proto.RangeDescriptor, from, to proto.Key) (func(), int, error) {
if !desc.ContainsKey(from) {
from = desc.StartKey
}
if !desc.ContainsKeyRange(desc.StartKey, to) || to == nil {
to = desc.EndKey
}
truncateOne := func(args proto.Request) (bool, []func(), error) {
if _, ok := args.(*proto.NoopRequest); ok {
return true, nil, nil
}
header := args.Header()
if !proto.IsRange(args) {
if len(header.EndKey) > 0 {
return false, nil, util.Errorf("%T is not a range command, but EndKey is set", args)
}
if !desc.ContainsKey(keys.KeyAddress(header.Key)) {
return true, nil, nil
}
return false, nil, nil
}
var undo []func()
key, endKey := header.Key, header.EndKey
keyAddr, endKeyAddr := keys.KeyAddress(key), keys.KeyAddress(endKey)
if keyAddr.Less(from) {
undo = append(undo, func() { header.Key = key })
header.Key = from
keyAddr = from
}
if !endKeyAddr.Less(to) {
undo = append(undo, func() { header.EndKey = endKey })
header.EndKey = to
endKeyAddr = to
}
// Check whether the truncation has left any keys in the range. If not,
// we need to cut it out of the request.
return !keyAddr.Less(endKeyAddr), undo, nil
}
var fns []func()
gUndo := func() {
for _, f := range fns {
f()
}
}
var numNoop int
for pos, arg := range br.Requests {
omit, undo, err := truncateOne(arg.GetValue().(proto.Request))
if omit {
numNoop++
nReq := &proto.RequestUnion{}
nReq.SetValue(&proto.NoopRequest{})
oReq := br.Requests[pos]
br.Requests[pos] = *nReq
posCpy := pos // for closure
undo = append(undo, func() {
br.Requests[posCpy] = oReq
})
}
fns = append(fns, undo...)
if err != nil {
return gUndo, 0, err
}
}
return gUndo, len(br.Requests) - numNoop, nil
}
示例6: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc).
func (ds *DistSender) sendChunk(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
// TODO(tschottdorf): prepare for removing Key and EndKey from BatchRequest,
// making sure that anything that relies on them goes bust.
ba.Key, ba.EndKey = nil, nil
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
from, to := keys.Range(ba)
var br *proto.BatchResponse
// Send the request to one range per iteration.
for {
options := lookupOptions{
useReverseScan: isReverse,
}
var curReply *proto.BatchResponse
var desc *proto.RangeDescriptor
var needAnother bool
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, err = ds.getDescriptors(from, to, options)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(retry.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if needAnother && ba.Txn == nil && ba.IsRange() &&
ba.ReadConsistency != proto.INCONSISTENT {
return nil, &proto.OpRequiresTxnError{}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
if (isReverse && !desc.ContainsKeyRange(desc.StartKey, to)) || (!isReverse && !desc.ContainsKeyRange(from, desc.EndKey)) {
evictDesc()
continue
}
curReply, err = func() (*proto.BatchResponse, error) {
// Truncate the request to our current key range.
untruncate, numActive, trErr := truncate(&ba, desc, from, to)
if numActive == 0 {
untruncate()
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, util.Errorf("truncation resulted in empty batch on [%s,%s): %s",
from, to, ba)
}
defer untruncate()
if trErr != nil {
return nil, trErr
}
// TODO(tschottdorf): make key range on batch redundant. The
// requests within dictate it anyways.
ba.Key, ba.EndKey = keys.Range(ba)
reply, err := ds.sendAttempt(trace, ba, desc)
ba.Key, ba.EndKey = nil, nil
if err != nil {
if log.V(0 /* TODO(tschottdorf): 1 */) {
log.Warningf("failed to invoke %s: %s", ba, err)
}
}
return reply, err
}()
// If sending succeeded, break this loop.
if err == nil {
break
}
//.........这里部分代码省略.........
示例7: sendAttempt
// sendAttempt is invoked by Send and handles retry logic and cache eviction
// for a call sent to a single range. It returns a retry status, which is Break
// on success and either Break, Continue or Reset depending on error condition.
// This method is expected to be invoked from within a backoff / retry loop to
// retry the send repeatedly (e.g. to continue processing after a critical node
// becomes available after downtime or the range descriptor is refreshed via
// lookup).
func (ds *DistSender) sendAttempt(desc *proto.RangeDescriptor, call proto.Call) (retry.Status, error) {
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
args := call.Args
reply := call.Reply
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
err := ds.sendRPC(desc.RaftID, replicas, order, args, reply)
if err != nil {
// For an RPC error to occur, we must've been unable to contact any
// replicas. In this case, likely all nodes are down (or not getting back
// to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date replicas, so
// clearing the descriptor here should be a good idea.
// TODO(tschottdorf): If a replica group goes dead, this will cause clients
// to put high read pressure on the first range, so there should be some
// rate limiting here.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
} else {
err = reply.Header().GoError()
}
if err != nil {
if log.V(1) {
log.Warningf("failed to invoke %s: %s", call.Method(), err)
}
// If retryable, allow retry. For range not found or range
// key mismatch errors, we don't backoff on the retry,
// but reset the backoff loop so we can retry immediately.
switch tErr := err.(type) {
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
// Range descriptor might be out of date - evict it.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
// On addressing errors, don't backoff; retry immediately.
return retry.Reset, err
case *proto.NotLeaderError:
newLeader := tErr.GetLeader()
// Verify that leader is a known replica according to the
// descriptor. If not, we've got a stale replica; evict cache.
// Next, cache the new leader.
if newLeader != nil {
if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 {
if log.V(1) {
log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc)
}
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
}
} else {
newLeader = &proto.Replica{}
}
ds.updateLeaderCache(proto.RaftID(desc.RaftID), *newLeader)
return retry.Reset, err
case util.Retryable:
if tErr.CanRetry() {
return retry.Continue, err
}
}
return retry.Break, err
}
return retry.Break, nil
}
示例8: Send
// Send implements the client.Sender interface. It verifies
// permissions and looks up the appropriate range based on the
// supplied key and sends the RPC according to the specified options.
//
// If the request spans multiple ranges (which is possible for Scan or
// DeleteRange requests), Send sends requests to the individual ranges
// sequentially and combines the results transparently.
//
// This may temporarily adjust the request headers, so the proto.Call
// must not be used concurrently until Send has returned.
func (ds *DistSender) Send(ctx context.Context, call proto.Call) {
args := call.Args
// Verify permissions.
if err := ds.verifyPermissions(call.Args); err != nil {
call.Reply.Header().SetGoError(err)
return
}
trace := tracer.FromCtx(ctx)
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if args.Header().ReadConsistency == proto.INCONSISTENT && args.Header().Timestamp.Equal(proto.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp proto.Timestamp) {
args.Header().Timestamp = timestamp
}(args.Header().Timestamp)
args.Header().Timestamp = ds.clock.Now()
}
// If this is a bounded request, we will change its bound as we receive
// replies. This undoes that when we return.
boundedArgs, argsBounded := args.(proto.Bounded)
if argsBounded {
defer func(bound int64) {
boundedArgs.SetBound(bound)
}(boundedArgs.GetBound())
}
_, isReverseScan := call.Args.(*proto.ReverseScanRequest)
// Restore to the original range if the scan/reverse_scan crosses range boundaries.
if isReverseScan {
defer func(key proto.Key) {
args.Header().EndKey = key
}(args.Header().EndKey)
} else {
defer func(key proto.Key) {
args.Header().Key = key
}(args.Header().Key)
}
first := true
// Retry logic for lookup of range by key and RPCs to range replicas.
for {
var curReply proto.Response
var desc, descNext *proto.RangeDescriptor
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
// It is safe to pass call here (with its embedded reply) because
// the reply is only used to check that it implements
// proto.Combinable if the request spans multiple ranges.
desc, descNext, err = ds.getDescriptors(call)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(retry.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
// At this point reply.Header().Error may be non-nil!
curReply, err = ds.sendAttempt(trace, args, desc)
descKey := args.Header().Key
if isReverseScan {
descKey = args.Header().EndKey
}
if err != nil {
trace.Event(fmt.Sprintf("send error: %T", err))
// For an RPC error to occur, we must've been unable to contact any
// replicas. In this case, likely all nodes are down (or not getting back
// to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date replicas, so
// clearing the descriptor here should be a good idea.
// TODO(tschottdorf): If a replica group goes dead, this will cause clients
// to put high read pressure on the first range, so there should be some
// rate limiting here.
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, isReverseScan)
} else {
//.........这里部分代码省略.........
示例9: TestSendRPCOrder
//.........这里部分代码省略.........
// Put with matching attributes but no leader.
// Should move the two nodes matching the attributes to the front and
// go stable.
{
args: &proto.PutRequest{},
attrs: append(nodeAttrs[5], "irrelevant"),
// Compare only the first two resulting addresses.
order: rpc.OrderStable,
expReplica: []int32{5, 4, 0, 0, 0},
},
// Put with matching attributes that finds the leader (node 3).
// Should address the leader and the two nodes matching the attributes
// (the last and second to last) in that order.
{
args: &proto.PutRequest{},
attrs: append(nodeAttrs[5], "irrelevant"),
// Compare only the first resulting addresses as we have a leader
// and that means we're only trying to send there.
order: rpc.OrderStable,
expReplica: []int32{2, 5, 4, 0, 0},
leader: 2,
},
// Inconsistent Get without matching attributes but leader (node 3). Should just
// go random as the leader does not matter.
{
args: &proto.GetRequest{},
attrs: []string{},
order: rpc.OrderRandom,
expReplica: []int32{1, 2, 3, 4, 5},
leader: 2,
},
}
descriptor := proto.RangeDescriptor{
StartKey: proto.KeyMin,
EndKey: proto.KeyMax,
RangeID: rangeID,
Replicas: nil,
}
// Stub to be changed in each test case.
var verifyCall func(rpc.Options, []net.Addr) error
var testFn rpcSendFn = func(opts rpc.Options, method string,
addrs []net.Addr, _ func(addr net.Addr) gogoproto.Message,
getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
if err := verifyCall(opts, addrs); err != nil {
return nil, err
}
return []gogoproto.Message{getReply()}, nil
}
ctx := &DistSenderContext{
RPCSend: testFn,
RangeDescriptorDB: mockRangeDescriptorDB(func(proto.Key, lookupOptions) ([]proto.RangeDescriptor, error) {
return []proto.RangeDescriptor{descriptor}, nil
}),
}
ds := NewDistSender(ctx, g)
for n, tc := range testCases {
verifyCall = makeVerifier(tc.order, tc.expReplica)
descriptor.Replicas = nil // could do this once above, but more convenient here
for i := int32(1); i <= 5; i++ {
addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))