本文整理汇总了Golang中github.com/cockroachdb/cockroach/proto.Request类的典型用法代码示例。如果您正苦于以下问题:Golang Request类的具体用法?Golang Request怎么用?Golang Request使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Request类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: ReadOnlyCmd
// ReadOnlyCmd updates the read timestamp cache and waits for any
// overlapping writes currently processing through Raft ahead of us to
// clear via the read queue.
func (r *Range) ReadOnlyCmd(method string, args proto.Request, reply proto.Response) error {
header := args.Header()
r.Lock()
r.tsCache.Add(header.Key, header.EndKey, header.Timestamp)
var wg sync.WaitGroup
r.readQ.AddRead(header.Key, header.EndKey, &wg)
r.Unlock()
wg.Wait()
// It's possible that arbitrary delays (e.g. major GC, VM
// de-prioritization, etc.) could cause the execution of this read
// command to occur AFTER the range replica has lost leadership.
//
// There is a chance that we waited on writes, and although they
// were committed to the log, they weren't successfully applied to
// this replica's state machine. We re-verify leadership before
// reading to make sure that all pending writes are persisted.
//
// There are some elaborate cases where we might have lost
// leadership and then regained it during the delay, but this is ok
// because any writes during that period necessarily had higher
// timestamps. This is because the read-timestamp-cache prevents it
// for the active leader and leadership changes force the
// read-timestamp-cache to reset its high water mark.
if !r.IsLeader() {
// TODO(spencer): when we happen to know the leader, fill it in here via replica.
return &proto.NotLeaderError{}
}
return r.executeCmd(method, args, reply)
}
示例2: ExecuteCmd
// ExecuteCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup of header.Key in the ranges slice.
func (kv *LocalKV) ExecuteCmd(method string, args proto.Request, replyChan interface{}) {
// If the replica isn't specified in the header, look it up.
var err error
var store *storage.Store
// If we aren't given a Replica, then a little bending over
// backwards here. We need to find the Store, but all we have is the
// Key. So find its Range locally, and pull out its Replica which we
// use to find the Store. This lets us use the same codepath below
// (store.ExecuteCmd) for both locally and remotely originated
// commands.
header := args.Header()
if header.Replica.NodeID == 0 {
if repl := kv.lookupReplica(header.Key); repl != nil {
header.Replica = *repl
} else {
err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
}
}
if err == nil {
store, err = kv.GetStore(&header.Replica)
}
reply := reflect.New(reflect.TypeOf(replyChan).Elem().Elem()).Interface().(proto.Response)
if err != nil {
reply.Header().SetGoError(err)
} else {
store.ExecuteCmd(method, args, reply)
}
reflect.ValueOf(replyChan).Send(reflect.ValueOf(reply))
}
示例3: ReadWriteCmd
// ReadWriteCmd first consults the response cache to determine whether
// this command has already been sent to the range. If a response is
// found, it's returned immediately and not submitted to raft. Next,
// the timestamp cache is checked to determine if any newer accesses to
// this command's affected keys have been made. If so, this command's
// timestamp is moved forward. Finally the keys affected by this
// command are added as pending writes to the read queue and the
// command is submitted to Raft. Upon completion, the write is removed
// from the read queue and the reply is added to the repsonse cache.
func (r *Range) ReadWriteCmd(method string, args proto.Request, reply proto.Response) error {
// Check the response cache in case this is a replay. This call
// may block if the same command is already underway.
header := args.Header()
if ok, err := r.respCache.GetResponse(header.CmdID, reply); ok || err != nil {
if ok { // this is a replay! extract error for return
return reply.Header().GoError()
}
// In this case there was an error reading from the response
// cache. Instead of failing the request just because we can't
// decode the reply in the response cache, we proceed as though
// idempotence has expired.
log.Errorf("unable to read result for %+v from the response cache: %v", args, err)
}
// One of the prime invariants of Cockroach is that a mutating command
// cannot write a key with an earlier timestamp than the most recent
// read of the same key. So first order of business here is to check
// the timestamp cache for reads/writes which are more recent than the
// timestamp of this write. If more recent, we simply update the
// write's timestamp before enqueuing it for execution. When the write
// returns, the updated timestamp will inform the final commit
// timestamp.
r.Lock() // Protect access to timestamp cache and read queue.
if ts := r.tsCache.GetMax(header.Key, header.EndKey); header.Timestamp.Less(ts) {
if glog.V(1) {
glog.Infof("Overriding existing timestamp %s with %s", header.Timestamp, ts)
}
ts.Logical++ // increment logical component by one to differentiate.
// Update the request timestamp.
header.Timestamp = ts
}
// Just as for reads, we update the timestamp cache with the
// timestamp of this write. This ensures a strictly higher timestamp
// for successive writes to the same key or key range.
r.tsCache.Add(header.Key, header.EndKey, header.Timestamp)
// The next step is to add the write to the read queue to inform
// subsequent reads that there is a pending write. Reads which
// overlap pending writes must wait for those writes to complete.
wKey := r.readQ.AddWrite(header.Key, header.EndKey)
r.Unlock()
// Create command and enqueue for Raft.
cmd := &Cmd{
Method: method,
Args: args,
Reply: reply,
done: make(chan error, 1),
}
// This waits for the command to complete.
err := r.EnqueueCmd(cmd)
// Now that the command has completed, remove the pending write.
r.Lock()
r.readQ.RemoveWrite(wKey)
r.Unlock()
return err
}
示例4: usesTimestampCache
// usesTimestampCache returns true if the request affects or is
// affected by the timestamp cache.
func usesTimestampCache(r proto.Request) bool {
m := r.Method()
if m < 0 || m >= proto.Method(len(tsCacheMethods)) {
return false
}
return tsCacheMethods[m]
}
示例5: maybeWrap
// MaybeWrap wraps the given argument in a batch, unless it is already one.
func maybeWrap(args proto.Request) (*proto.BatchRequest, func(*proto.BatchResponse) proto.Response) {
if ba, ok := args.(*proto.BatchRequest); ok {
return ba, func(br *proto.BatchResponse) proto.Response { return br }
}
ba := &proto.BatchRequest{}
ba.RequestHeader = *(gogoproto.Clone(args.Header()).(*proto.RequestHeader))
ba.Add(args)
return ba, func(br *proto.BatchResponse) proto.Response {
var unwrappedReply proto.Response
if len(br.Responses) == 0 {
unwrappedReply = args.CreateReply()
} else {
unwrappedReply = br.Responses[0].GetInner()
}
// The ReplyTxn is propagated from one response to the next request,
// and we adopt the mechanism that whenever the Txn changes, it needs
// to be set in the reply, for example to ratched up the transaction
// timestamp on writes when necessary.
// This is internally necessary to sequentially execute the batch,
// so it makes some sense to take the burden of updating the Txn
// from TxnCoordSender - it will only need to act on retries/aborts
// in the future.
unwrappedReply.Header().Txn = br.Txn
if unwrappedReply.Header().Error == nil {
unwrappedReply.Header().Error = br.Error
}
return unwrappedReply
}
}
示例6: updateForBatch
// updateForBatch updates the first argument (the header of a request contained
// in a batch) from the second one (the batch header), returning an error when
// inconsistencies are found.
// It is checked that the individual call does not have a User, UserPriority
// or Txn set that differs from the batch's.
func updateForBatch(args proto.Request, bHeader proto.RequestHeader) error {
// Disallow transaction, user and priority on individual calls, unless
// equal.
aHeader := args.Header()
if aHeader.User != "" && aHeader.User != bHeader.User {
return util.Error("conflicting user on call in batch")
}
if aPrio := aHeader.GetUserPriority(); aPrio != proto.Default_RequestHeader_UserPriority && aPrio != bHeader.GetUserPriority() {
return util.Error("conflicting user priority on call in batch")
}
aHeader.User = bHeader.User
aHeader.UserPriority = bHeader.UserPriority
// Only allow individual transactions on the requests of a batch if
// - the batch is non-transactional,
// - the individual transaction does not write intents, and
// - the individual transaction is initialized.
// The main usage of this is to allow mass-resolution of intents, which
// entails sending a non-txn batch of transactional InternalResolveIntent.
if aHeader.Txn != nil && !aHeader.Txn.Equal(bHeader.Txn) {
if len(aHeader.Txn.ID) == 0 || proto.IsTransactionWrite(args) || bHeader.Txn != nil {
return util.Error("conflicting transaction in transactional batch")
}
} else {
aHeader.Txn = bHeader.Txn
}
return nil
}
示例7: ExecuteCmd
// ExecuteCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each of the stores.
func (kv *LocalKV) ExecuteCmd(method string, args proto.Request, replyChan interface{}) {
// If the replica isn't specified in the header, look it up.
var err error
var store *storage.Store
// If we aren't given a Replica, then a little bending over
// backwards here. We need to find the Store, but all we have is the
// Key. So find its Range locally. This lets us use the same
// codepath below (store.ExecuteCmd) for both locally and remotely
// originated commands.
header := args.Header()
if header.Replica.StoreID == 0 {
var repl *proto.Replica
repl, err = kv.lookupReplica(header.Key, header.EndKey)
if err == nil {
header.Replica = *repl
}
}
if err == nil {
store, err = kv.GetStore(header.Replica.StoreID)
}
reply := reflect.New(reflect.TypeOf(replyChan).Elem().Elem()).Interface().(proto.Response)
if err != nil {
reply.Header().SetGoError(err)
} else {
store.ExecuteCmd(method, args, reply)
if err := reply.Verify(args); err != nil {
reply.Header().SetGoError(err)
}
}
reflect.ValueOf(replyChan).Send(reflect.ValueOf(reply))
}
示例8: executeCmd
// executeCmd looks up the store specified by header.Replica, and runs
// Store.ExecuteCmd.
func (n *Node) executeCmd(method string, args proto.Request, reply proto.Response) error {
store, err := n.localKV.GetStore(&args.Header().Replica)
if err != nil {
return err
}
store.ExecuteCmd(method, args, reply)
return nil
}
示例9: endCmd
// endCmd removes a pending command from the command queue.
func (r *Range) endCmd(cmdKey interface{}, args proto.Request, err error, readOnly bool) {
r.Lock()
if err == nil && usesTimestampCache(args) {
header := args.Header()
r.tsCache.Add(header.Key, header.EndKey, header.Timestamp, header.Txn.GetID(), readOnly)
}
r.cmdQ.Remove(cmdKey)
r.Unlock()
}
示例10: applyRaftCommand
// applyRaftCommand applies a raft command from the replicated log to the
// underlying state machine (i.e. the engine).
// When certain critical operations fail, a replicaCorruptionError may be
// returned and must be handled by the caller.
func (r *Range) applyRaftCommand(ctx context.Context, index uint64, originNode proto.RaftNodeID, args proto.Request, reply proto.Response) error {
if index <= 0 {
log.Fatalc(ctx, "raft command index is <= 0")
}
// If we have an out of order index, there's corruption. No sense in trying
// to update anything or run the command. Simply return a corruption error.
if oldIndex := atomic.LoadUint64(&r.appliedIndex); oldIndex >= index {
return newReplicaCorruptionError(util.Errorf("applied index moved backwards: %d >= %d", oldIndex, index))
}
// Call the helper, which returns a batch containing data written
// during command execution and any associated error.
ms := engine.MVCCStats{}
batch, rErr := r.applyRaftCommandInBatch(ctx, index, originNode, args, reply, &ms)
// ALWAYS set the reply header error to the error returned by the
// helper. This is the definitive result of the execution. The
// error must be set before saving to the response cache.
// TODO(tschottdorf,tamird) For #1400, want to refactor executeCmd to not
// touch the reply header's error field.
reply.Header().SetGoError(rErr)
defer batch.Close()
// Advance the last applied index and commit the batch.
if err := setAppliedIndex(batch, r.Desc().RaftID, index); err != nil {
log.Fatalc(ctx, "setting applied index in a batch should never fail: %s", err)
}
if err := batch.Commit(); err != nil {
rErr = newReplicaCorruptionError(util.Errorf("could not commit batch"), err, rErr)
} else {
// Update cached appliedIndex if we were able to set the applied index on disk.
atomic.StoreUint64(&r.appliedIndex, index)
}
// On successful write commands, flush to event feed, and handle other
// write-related triggers including splitting and config gossip updates.
if rErr == nil && proto.IsWrite(args) {
// Publish update to event feed.
r.rm.EventFeed().updateRange(r, args.Method(), &ms)
// If the commit succeeded, potentially add range to split queue.
r.maybeAddToSplitQueue()
// Maybe update gossip configs on a put.
switch args.(type) {
case *proto.PutRequest, *proto.DeleteRequest, *proto.DeleteRangeRequest:
if key := args.Header().Key; key.Less(keys.SystemMax) {
// We hold the lock already.
r.maybeGossipConfigsLocked(func(configPrefix proto.Key) bool {
return bytes.HasPrefix(key, configPrefix)
})
}
}
}
return rErr
}
示例11: updateForBatch
// UpdateForBatch updates the first argument (the header of a request contained
// in a batch) from the second one (the batch header), returning an error when
// inconsistencies are found.
// It is checked that the individual call does not have a UserPriority
// or Txn set that differs from the batch's.
// TODO(tschottdorf): will go with #2143.
func updateForBatch(args proto.Request, bHeader proto.RequestHeader) error {
// Disallow transaction, user and priority on individual calls, unless
// equal.
aHeader := args.Header()
if aPrio := aHeader.GetUserPriority(); aPrio != proto.Default_RequestHeader_UserPriority && aPrio != bHeader.GetUserPriority() {
return util.Errorf("conflicting user priority on call in batch")
}
aHeader.UserPriority = bHeader.UserPriority
aHeader.Txn = bHeader.Txn // reqs always take Txn from batch
return nil
}
示例12: addReadOnlyCmd
// addReadOnlyCmd updates the read timestamp cache and waits for any
// overlapping writes currently processing through Raft ahead of us to
// clear via the read queue.
func (r *Range) addReadOnlyCmd(ctx context.Context, args proto.Request, reply proto.Response) error {
header := args.Header()
if err := r.checkCmdHeader(header); err != nil {
reply.Header().SetGoError(err)
return err
}
// If read-consistency is set to INCONSISTENT, run directly.
if header.ReadConsistency == proto.INCONSISTENT {
// But disallow any inconsistent reads within txns.
if header.Txn != nil {
reply.Header().SetGoError(util.Error("cannot allow inconsistent reads within a transaction"))
return reply.Header().GoError()
}
if header.Timestamp.Equal(proto.ZeroTimestamp) {
header.Timestamp = r.rm.Clock().Now()
}
intents, err := r.executeCmd(r.rm.Engine(), nil, args, reply)
if err == nil {
r.handleSkippedIntents(args, intents)
}
return err
} else if header.ReadConsistency == proto.CONSENSUS {
reply.Header().SetGoError(util.Error("consensus reads not implemented"))
return reply.Header().GoError()
}
// Add the read to the command queue to gate subsequent
// overlapping commands until this command completes.
cmdKey := r.beginCmd(header, true)
// This replica must have leader lease to process a consistent read.
if err := r.redirectOnOrAcquireLeaderLease(tracer.FromCtx(ctx), header.Timestamp); err != nil {
r.endCmd(cmdKey, args, err, true /* readOnly */)
reply.Header().SetGoError(err)
return err
}
// Execute read-only command.
intents, err := r.executeCmd(r.rm.Engine(), nil, args, reply)
// Only update the timestamp cache if the command succeeded.
r.endCmd(cmdKey, args, err, true /* readOnly */)
if err == nil {
r.handleSkippedIntents(args, intents)
}
return err
}
示例13: sendAttempt
// sendAttempt is invoked by Send. It temporarily truncates the arguments to
// match the descriptor's EndKey (if necessary) and gathers and rearranges the
// replicas before making a single attempt at sending the request. It returns
// the result of sending the RPC; a potential error contained in the reply has
// to be handled separately by the caller.
func (ds *DistSender) sendAttempt(trace *tracer.Trace, args proto.Request, reply proto.Response, desc *proto.RangeDescriptor) error {
defer trace.Epoch("sending RPC")()
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if endKey := args.Header().EndKey; endKey != nil && !endKey.Less(desc.EndKey) {
defer func(k proto.Key) { args.Header().EndKey = k }(endKey)
args.Header().EndKey = desc.EndKey
}
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
return ds.sendRPC(trace, desc.RaftID, replicas, order, args, reply)
}
示例14: CallComplete
// CallComplete is called by a node whenever it completes a request. This will
// publish an appropriate event to the feed based on the results of the call.
func (nef NodeEventFeed) CallComplete(args proto.Request, reply proto.Response) {
if err := reply.Header().Error; err != nil &&
err.CanRestartTransaction() == proto.TransactionRestart_ABORT {
nef.f.Publish(&CallErrorEvent{
NodeID: nef.id,
Method: args.Method(),
})
} else {
nef.f.Publish(&CallSuccessEvent{
NodeID: nef.id,
Method: args.Method(),
})
}
}
示例15: addAdminCmd
// addAdminCmd executes the command directly. There is no interaction
// with the command queue or the timestamp cache, as admin commands
// are not meant to consistently access or modify the underlying data.
// Admin commands must run on the leader replica.
func (r *Range) addAdminCmd(ctx context.Context, args proto.Request, reply proto.Response) error {
// Admin commands always require the leader lease.
if err := r.redirectOnOrAcquireLeaderLease(args.Header().Timestamp); err != nil {
reply.Header().SetGoError(err)
return err
}
switch args.(type) {
case *proto.AdminSplitRequest:
r.AdminSplit(args.(*proto.AdminSplitRequest), reply.(*proto.AdminSplitResponse))
case *proto.AdminMergeRequest:
r.AdminMerge(args.(*proto.AdminMergeRequest), reply.(*proto.AdminMergeResponse))
default:
return util.Error("unrecognized admin command")
}
return reply.Header().GoError()
}