本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.Call.Reply方法的典型用法代碼示例。如果您正苦於以下問題:Golang Call.Reply方法的具體用法?Golang Call.Reply怎麽用?Golang Call.Reply使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/proto.Call
的用法示例。
在下文中一共展示了Call.Reply方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: sendBatch
// sendBatch unrolls a batched command and sends each constituent
// command in parallel.
// TODO(tschottdorf): modify sendBatch so that it sends truly parallel requests
// when outside of a Transaction. This can then be used to address the TODO in
// (*TxnCoordSender).resolve().
func (tc *TxnCoordSender) sendBatch(ctx context.Context, batchArgs *proto.BatchRequest, batchReply *proto.BatchResponse) {
// Prepare the calls by unrolling the batch. If the batchReply is
// pre-initialized with replies, use those; otherwise create replies
// as needed.
// TODO(spencer): send calls in parallel.
batchReply.Txn = batchArgs.Txn
for i := range batchArgs.Requests {
args := batchArgs.Requests[i].GetValue().(proto.Request)
if err := updateForBatch(args, batchArgs.RequestHeader); err != nil {
batchReply.Header().SetGoError(err)
return
}
call := proto.Call{Args: args}
// Create a reply from the method type and add to batch response.
if i >= len(batchReply.Responses) {
call.Reply = args.CreateReply()
batchReply.Add(call.Reply)
} else {
call.Reply = batchReply.Responses[i].GetValue().(proto.Response)
}
tc.sendOne(ctx, call)
// Amalgamate transaction updates and propagate first error, if applicable.
if batchReply.Txn != nil {
batchReply.Txn.Update(call.Reply.Header().Txn)
}
if call.Reply.Header().Error != nil {
batchReply.Error = call.Reply.Header().Error
return
}
}
}
示例2: sendBatch
// sendBatch unrolls a batched command and sends each constituent
// command in parallel.
func (tc *TxnCoordSender) sendBatch(batchArgs *proto.InternalBatchRequest, batchReply *proto.InternalBatchResponse) {
// Prepare the calls by unrolling the batch. If the batchReply is
// pre-initialized with replies, use those; otherwise create replies
// as needed.
// TODO(spencer): send calls in parallel.
batchReply.Txn = batchArgs.Txn
for i := range batchArgs.Requests {
args := batchArgs.Requests[i].GetValue().(proto.Request)
call := proto.Call{Args: args}
// Disallow transaction, user and priority on individual calls, unless
// equal.
if args.Header().User != "" && args.Header().User != batchArgs.User {
batchReply.Header().SetGoError(util.Error("cannot have individual user on call in batch"))
return
}
args.Header().User = batchArgs.User
if args.Header().UserPriority != nil && args.Header().GetUserPriority() != batchArgs.GetUserPriority() {
batchReply.Header().SetGoError(util.Error("cannot have individual user priority on call in batch"))
return
}
args.Header().UserPriority = batchArgs.UserPriority
if txn := args.Header().Txn; txn != nil && !txn.Equal(batchArgs.Txn) {
batchReply.Header().SetGoError(util.Error("cannot have individual transactional call in batch"))
return
}
// Propagate batch Txn to each call.
args.Header().Txn = batchArgs.Txn
// Create a reply from the method type and add to batch response.
if i >= len(batchReply.Responses) {
call.Reply = args.CreateReply()
batchReply.Add(call.Reply)
} else {
call.Reply = batchReply.Responses[i].GetValue().(proto.Response)
}
tc.sendOne(call)
// Amalgamate transaction updates and propagate first error, if applicable.
if batchReply.Txn != nil {
batchReply.Txn.Update(call.Reply.Header().Txn)
}
if call.Reply.Header().Error != nil {
batchReply.Error = call.Reply.Header().Error
return
}
}
}
示例3: MaybeWrapCall
// MaybeWrapCall returns a new call which wraps the original Args and Reply
// in a batch, if necessary.
// TODO(tschottdorf): will go when proto.Call does.
func MaybeWrapCall(call proto.Call) (proto.Call, func(proto.Call) proto.Call) {
var unwrap func(proto.Response) proto.Response
call.Args, unwrap = MaybeWrap(call.Args)
newUnwrap := func(origReply proto.Response) func(proto.Call) proto.Call {
return func(newCall proto.Call) proto.Call {
origReply.Reset()
gogoproto.Merge(origReply, unwrap(newCall.Reply))
*origReply.Header() = *newCall.Reply.Header()
newCall.Reply = origReply
return newCall
}
}(call.Reply)
call.Reply = call.Args.CreateReply()
return call, newUnwrap
}
示例4: Send
// Send implements the client.Sender interface. It verifies
// permissions and looks up the appropriate range based on the
// supplied key and sends the RPC according to the specified options.
//
// If the request spans multiple ranges (which is possible for Scan or
// DeleteRange requests), Send sends requests to the individual ranges
// sequentially and combines the results transparently.
//
// This may temporarily adjust the request headers, so the proto.Call
// must not be used concurrently until Send has returned.
func (ds *DistSender) Send(_ context.Context, call proto.Call) {
args := call.Args
finalReply := call.Reply
// Verify permissions.
if err := ds.verifyPermissions(call.Args); err != nil {
call.Reply.Header().SetGoError(err)
return
}
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if args.Header().ReadConsistency == proto.INCONSISTENT && args.Header().Timestamp.Equal(proto.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp proto.Timestamp) {
args.Header().Timestamp = timestamp
}(args.Header().Timestamp)
args.Header().Timestamp = ds.clock.Now()
}
// If this is a bounded request, we will change its bound as we receive
// replies. This undoes that when we return.
boundedArgs, argsBounded := args.(proto.Bounded)
if argsBounded {
defer func(bound int64) {
boundedArgs.SetBound(bound)
}(boundedArgs.GetBound())
}
defer func(key proto.Key) {
args.Header().Key = key
}(args.Header().Key)
// Retry logic for lookup of range by key and RPCs to range replicas.
curReply := finalReply
for {
call.Reply = curReply
curReply.Header().Reset()
var desc, descNext *proto.RangeDescriptor
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors).
// sendAttempt below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
desc, descNext, err = ds.getDescriptors(call)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(util.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
err = func() error {
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if descNext != nil {
defer func(endKey proto.Key) {
args.Header().EndKey = endKey
}(args.Header().EndKey)
args.Header().EndKey = desc.EndKey
}
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
return ds.sendRPC(desc.RaftID, replicas, order, args, curReply)
}()
//.........這裏部分代碼省略.........
示例5: Send
// Send implements the client.Sender interface. It verifies
// permissions and looks up the appropriate range based on the
// supplied key and sends the RPC according to the specified options.
//
// If the request spans multiple ranges (which is possible for Scan or
// DeleteRange requests), Send sends requests to the individual ranges
// sequentially and combines the results transparently.
//
// This may temporarily adjust the request headers, so the proto.Call
// must not be used concurrently until Send has returned.
func (ds *DistSender) Send(_ context.Context, call proto.Call) {
args := call.Args
finalReply := call.Reply
endKey := args.Header().EndKey
// Verify permissions.
if err := ds.verifyPermissions(call.Args); err != nil {
call.Reply.Header().SetGoError(err)
return
}
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if args.Header().ReadConsistency == proto.INCONSISTENT && args.Header().Timestamp.Equal(proto.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp proto.Timestamp) {
args.Header().Timestamp = timestamp
}(args.Header().Timestamp)
args.Header().Timestamp = ds.clock.Now()
}
// If this is a bounded request, we will change its bound as we receive
// replies. This undoes that when we return.
boundedArgs, _ := args.(proto.Bounded)
if boundedArgs != nil {
defer func(n int64) {
boundedArgs.SetBound(n)
}(boundedArgs.GetBound())
}
// Retry logic for lookup of range by key and RPCs to range replicas.
retryOpts := ds.rpcRetryOptions
retryOpts.Tag = "routing " + call.Method().String() + " rpc"
curReply := finalReply
for {
call.Reply = curReply
curReply.Header().Reset()
var desc, descNext *proto.RangeDescriptor
err := retry.WithBackoff(retryOpts, func() (retry.Status, error) {
var err error
// Get range descriptor (or, when spanning range, descriptors).
// sendAttempt below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
desc, descNext, err = ds.getDescriptors(call)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(util.Retryable); ok && rErr.CanRetry() {
return retry.Continue, err
}
return retry.Break, err
}
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if descNext != nil {
args.Header().EndKey = desc.EndKey
defer func() {
// "Untruncate" EndKey to original.
args.Header().EndKey = endKey
}()
}
return ds.sendAttempt(desc, call)
})
// Immediately return if querying a range failed non-retryably.
// For multi-range requests, we return the failing range's reply.
if err != nil {
call.Reply.Header().SetGoError(err)
return
}
if finalReply != curReply {
// This was the second or later call in a multi-range request.
// Combine the new response with the existing one.
if cFinalReply, ok := finalReply.(proto.Combinable); ok {
cFinalReply.Combine(curReply)
} else {
// This should never apply in practice, as we'll only end up here
// for range-spanning requests.
call.Reply.Header().SetGoError(util.Errorf("multi-range request with non-combinable response type"))
return
}
}
// If this request has a bound, such as MaxResults in
// ScanRequest, check whether enough rows have been retrieved.
//.........這裏部分代碼省略.........
示例6: close
// close sends resolve intent commands for all key ranges this
// transaction has covered, clears the keys cache and closes the
// metadata heartbeat. Any keys listed in the resolved slice have
// already been resolved and do not receive resolve intent commands.
func (tm *txnMetadata) close(txn *proto.Transaction, resolved []proto.Key, sender client.Sender, stopper *util.Stopper) {
close(tm.txnEnd) // stop heartbeat
if tm.keys.Len() > 0 {
if log.V(2) {
log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn)
}
}
for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) {
// If the op was range based, end key != start key: resolve a range.
var call proto.Call
key := o.Key.Start().(proto.Key)
endKey := o.Key.End().(proto.Key)
if !key.Next().Equal(endKey) {
call.Args = &proto.InternalResolveIntentRangeRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
EndKey: endKey,
User: storage.UserRoot,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentRangeResponse{}
} else {
// Check if the key has already been resolved; skip if yes.
found := false
for _, k := range resolved {
if key.Equal(k) {
found = true
}
}
if found {
continue
}
call.Args = &proto.InternalResolveIntentRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
User: storage.UserRoot,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentResponse{}
}
// We don't care about the reply channel; these are best
// effort. We simply fire and forget, each in its own goroutine.
if stopper.StartTask() {
go func() {
if log.V(2) {
log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn)
}
sender.Send(context.TODO(), call)
if call.Reply.Header().Error != nil {
log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError())
}
stopper.FinishTask()
}()
}
}
tm.keys.Clear()
}
示例7: close
// close sends resolve intent commands for all key ranges this
// transaction has covered, clears the keys cache and closes the
// metadata heartbeat. Any keys listed in the resolved slice have
// already been resolved and do not receive resolve intent commands.
func (tm *txnMetadata) close(trace *tracer.Trace, txn *proto.Transaction, resolved []proto.Key, sender client.Sender, stopper *stop.Stopper) {
close(tm.txnEnd) // stop heartbeat
trace.Event("coordinator stops")
if tm.keys.Len() > 0 {
if log.V(2) {
log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn)
}
}
// TODO(tschottdorf): Should create a Batch here.
for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) {
// If the op was range based, end key != start key: resolve a range.
var call proto.Call
key := o.Key.Start().(proto.Key)
endKey := o.Key.End().(proto.Key)
if !key.Next().Equal(endKey) {
call.Args = &proto.InternalResolveIntentRangeRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
EndKey: endKey,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentRangeResponse{}
} else {
// Check if the key has already been resolved; skip if yes.
found := false
for _, k := range resolved {
if key.Equal(k) {
found = true
}
}
if found {
continue
}
call.Args = &proto.InternalResolveIntentRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentResponse{}
}
// We don't care about the reply channel; these are best
// effort. We simply fire and forget, each in its own goroutine.
ctx := tracer.ToCtx(context.Background(), trace.Fork())
stopper.RunAsyncTask(func() {
if log.V(2) {
log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn)
}
sender.Send(ctx, call)
if call.Reply.Header().Error != nil {
log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError())
}
})
}
tm.keys.Clear()
}
示例8: resolve
// resolve sends resolve intent commands for all key ranges this transaction
// has covered. Any keys listed in the resolved slice have already been
// resolved and are skipped.
func (tm *txnMetadata) resolve(trace *tracer.Trace, resolved []proto.Key, sender client.Sender) {
txn := &tm.txn
if tm.keys.Len() > 0 {
if log.V(2) {
log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn)
}
}
// TODO(tschottdorf): Should create a Batch here. However, we're resolving
// intents and if those are on meta records, there may be a certain order
// in which they need to be resolved so that they can get routed to the
// correct range. Since a batch runs its commands one by one and we don't
// know the correct order, we prefer to fire them off in parallel.
var wg sync.WaitGroup
for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) {
// If the op was range based, end key != start key: resolve a range.
var call proto.Call
key := o.Key.Start().(proto.Key)
endKey := o.Key.End().(proto.Key)
if !key.Next().Equal(endKey) {
call.Args = &proto.InternalResolveIntentRangeRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
EndKey: endKey,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentRangeResponse{}
} else {
// Check if the key has already been resolved; skip if yes.
found := false
for _, k := range resolved {
if key.Equal(k) {
if log.V(2) {
log.Warningf("skipping previously resolved intent at %q", k)
}
found = true
}
}
if found {
continue
}
call.Args = &proto.InternalResolveIntentRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentResponse{}
}
ctx := tracer.ToCtx(context.Background(), trace.Fork())
if log.V(2) {
log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn)
}
// Each operation gets their own goroutine. We only want to return to
// the caller after the operations have finished.
wg.Add(1)
go func() {
sender.Send(ctx, call)
wg.Done()
if call.Reply.Header().Error != nil {
log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError())
}
}()
}
defer trace.Epoch("waiting for intent resolution")()
wg.Wait()
tm.keys.Clear()
}