本文整理汇总了Golang中github.com/cockroachdb/cockroach/proto.RangeDescriptor.FindReplica方法的典型用法代码示例。如果您正苦于以下问题:Golang RangeDescriptor.FindReplica方法的具体用法?Golang RangeDescriptor.FindReplica怎么用?Golang RangeDescriptor.FindReplica使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/proto.RangeDescriptor
的用法示例。
在下文中一共展示了RangeDescriptor.FindReplica方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Send
// Send implements the client.Sender interface. It verifies
// permissions and looks up the appropriate range based on the
// supplied key and sends the RPC according to the specified options.
//
// If the request spans multiple ranges (which is possible for Scan or
// DeleteRange requests), Send sends requests to the individual ranges
// sequentially and combines the results transparently.
//
// This may temporarily adjust the request headers, so the proto.Call
// must not be used concurrently until Send has returned.
func (ds *DistSender) Send(_ context.Context, call proto.Call) {
args := call.Args
finalReply := call.Reply
// Verify permissions.
if err := ds.verifyPermissions(call.Args); err != nil {
call.Reply.Header().SetGoError(err)
return
}
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if args.Header().ReadConsistency == proto.INCONSISTENT && args.Header().Timestamp.Equal(proto.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp proto.Timestamp) {
args.Header().Timestamp = timestamp
}(args.Header().Timestamp)
args.Header().Timestamp = ds.clock.Now()
}
// If this is a bounded request, we will change its bound as we receive
// replies. This undoes that when we return.
boundedArgs, argsBounded := args.(proto.Bounded)
if argsBounded {
defer func(bound int64) {
boundedArgs.SetBound(bound)
}(boundedArgs.GetBound())
}
defer func(key proto.Key) {
args.Header().Key = key
}(args.Header().Key)
// Retry logic for lookup of range by key and RPCs to range replicas.
curReply := finalReply
for {
call.Reply = curReply
curReply.Header().Reset()
var desc, descNext *proto.RangeDescriptor
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors).
// sendAttempt below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
desc, descNext, err = ds.getDescriptors(call)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(util.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
err = func() error {
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if descNext != nil {
defer func(endKey proto.Key) {
args.Header().EndKey = endKey
}(args.Header().EndKey)
args.Header().EndKey = desc.EndKey
}
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
return ds.sendRPC(desc.RaftID, replicas, order, args, curReply)
}()
//.........这里部分代码省略.........
示例2: sendAttempt
// sendAttempt is invoked by Send and handles retry logic and cache eviction
// for a call sent to a single range. It returns a retry status, which is Break
// on success and either Break, Continue or Reset depending on error condition.
// This method is expected to be invoked from within a backoff / retry loop to
// retry the send repeatedly (e.g. to continue processing after a critical node
// becomes available after downtime or the range descriptor is refreshed via
// lookup).
func (ds *DistSender) sendAttempt(desc *proto.RangeDescriptor, call proto.Call) (retry.Status, error) {
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
args := call.Args
reply := call.Reply
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
err := ds.sendRPC(desc.RaftID, replicas, order, args, reply)
if err != nil {
// For an RPC error to occur, we must've been unable to contact any
// replicas. In this case, likely all nodes are down (or not getting back
// to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date replicas, so
// clearing the descriptor here should be a good idea.
// TODO(tschottdorf): If a replica group goes dead, this will cause clients
// to put high read pressure on the first range, so there should be some
// rate limiting here.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
} else {
err = reply.Header().GoError()
}
if err != nil {
if log.V(1) {
log.Warningf("failed to invoke %s: %s", call.Method(), err)
}
// If retryable, allow retry. For range not found or range
// key mismatch errors, we don't backoff on the retry,
// but reset the backoff loop so we can retry immediately.
switch tErr := err.(type) {
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
// Range descriptor might be out of date - evict it.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
// On addressing errors, don't backoff; retry immediately.
return retry.Reset, err
case *proto.NotLeaderError:
newLeader := tErr.GetLeader()
// Verify that leader is a known replica according to the
// descriptor. If not, we've got a stale replica; evict cache.
// Next, cache the new leader.
if newLeader != nil {
if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 {
if log.V(1) {
log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc)
}
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
}
} else {
newLeader = &proto.Replica{}
}
ds.updateLeaderCache(proto.RaftID(desc.RaftID), *newLeader)
return retry.Reset, err
case util.Retryable:
if tErr.CanRetry() {
return retry.Continue, err
}
}
return retry.Break, err
}
return retry.Break, nil
}
示例3: sendChunk
//.........这里部分代码省略.........
// any replicas. In this case, likely all nodes are down (or
// not getting back to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date
// replicas, so clearing the descriptor here should be a good
// idea.
// TODO(tschottdorf): If a replica group goes dead, this
// will cause clients to put high read pressure on the first
// range, so there should be some rate limiting here.
evictDesc()
if tErr.CanRetry() {
continue
}
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
trace.Event(fmt.Sprintf("reply error: %T", err))
// Range descriptor might be out of date - evict it.
evictDesc()
// On addressing errors, don't backoff; retry immediately.
r.Reset()
if log.V(1) {
log.Warning(err)
}
// For the remainder of this call, we'll assume that intents
// are fair game. This replaces more complex logic based on
// the type of request.
options.considerIntents = true
continue
case *proto.NotLeaderError:
trace.Event(fmt.Sprintf("reply error: %T", err))
newLeader := tErr.GetLeader()
// Verify that leader is a known replica according to the
// descriptor. If not, we've got a stale replica; evict cache.
// Next, cache the new leader.
if newLeader != nil {
if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 {
if log.V(1) {
log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc)
}
evictDesc()
}
} else {
newLeader = &proto.Replica{}
}
ds.updateLeaderCache(proto.RangeID(desc.RangeID), *newLeader)
if log.V(1) {
log.Warning(err)
}
r.Reset()
continue
case retry.Retryable:
if tErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
trace.Event(fmt.Sprintf("reply error: %T", err))
continue
}
}
break
}
// Immediately return if querying a range failed non-retryably.
if err != nil {
return nil, err
}
first := br == nil
示例4: Send
//.........这里部分代码省略.........
} else {
err = curReply.Header().GoError()
}
if err == nil {
break
}
if log.V(1) {
log.Warningf("failed to invoke %s: %s", call.Method(), err)
}
// If retryable, allow retry. For range not found or range
// key mismatch errors, we don't backoff on the retry,
// but reset the backoff loop so we can retry immediately.
switch tErr := err.(type) {
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
trace.Event(fmt.Sprintf("reply error: %T", err))
// Range descriptor might be out of date - evict it.
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, isReverseScan)
// On addressing errors, don't backoff; retry immediately.
r.Reset()
if log.V(1) {
log.Warning(err)
}
continue
case *proto.NotLeaderError:
trace.Event(fmt.Sprintf("reply error: %T", err))
newLeader := tErr.GetLeader()
// Verify that leader is a known replica according to the
// descriptor. If not, we've got a stale replica; evict cache.
// Next, cache the new leader.
if newLeader != nil {
if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 {
if log.V(1) {
log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc)
}
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, isReverseScan)
}
} else {
newLeader = &proto.Replica{}
}
ds.updateLeaderCache(proto.RangeID(desc.RangeID), *newLeader)
if log.V(1) {
log.Warning(err)
}
r.Reset()
continue
case retry.Retryable:
if tErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
trace.Event(fmt.Sprintf("reply error: %T", err))
continue
}
}
break
}
// Immediately return if querying a range failed non-retryably.
// For multi-range requests, we return the failing range's reply.
if err != nil {
call.Reply.Header().SetGoError(err)
return
}