本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/stop.Stopper.ShouldQuiesce方法的典型用法代码示例。如果您正苦于以下问题:Golang Stopper.ShouldQuiesce方法的具体用法?Golang Stopper.ShouldQuiesce怎么用?Golang Stopper.ShouldQuiesce使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/pkg/util/stop.Stopper
的用法示例。
在下文中一共展示了Stopper.ShouldQuiesce方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: startBankTransfers
func startBankTransfers(t testing.TB, stopper *stop.Stopper, sqlDB *gosql.DB, numAccounts int) {
const maxTransfer = 999
for {
select {
case <-stopper.ShouldQuiesce():
return // All done.
default:
// Keep going.
}
from := rand.Intn(numAccounts)
to := rand.Intn(numAccounts - 1)
for from == to {
to = numAccounts - 1
}
amount := rand.Intn(maxTransfer)
const update = `UPDATE bench.bank
SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END
WHERE id IN ($1, $2)`
util.SucceedsSoon(t, func() error {
select {
case <-stopper.ShouldQuiesce():
return nil // All done.
default:
// Keep going.
}
_, err := sqlDB.Exec(update, from, to, amount)
return err
})
}
}
示例2: NewContext
// NewContext creates an rpc Context with the supplied values.
func NewContext(
ambient log.AmbientContext, baseCtx *base.Config, hlcClock *hlc.Clock, stopper *stop.Stopper,
) *Context {
ctx := &Context{
Config: baseCtx,
}
if hlcClock != nil {
ctx.localClock = hlcClock
} else {
ctx.localClock = hlc.NewClock(hlc.UnixNano)
}
ctx.breakerClock = breakerClock{
clock: ctx.localClock,
}
var cancel context.CancelFunc
ctx.masterCtx, cancel = context.WithCancel(ambient.AnnotateCtx(context.Background()))
ctx.Stopper = stopper
ctx.RemoteClocks = newRemoteClockMonitor(
ctx.masterCtx, ctx.localClock, 10*defaultHeartbeatInterval)
ctx.HeartbeatInterval = defaultHeartbeatInterval
ctx.HeartbeatTimeout = 2 * defaultHeartbeatInterval
ctx.conns.cache = make(map[string]*connMeta)
stopper.RunWorker(func() {
<-stopper.ShouldQuiesce()
cancel()
ctx.conns.Lock()
for key, meta := range ctx.conns.cache {
meta.Do(func() {
// Make sure initialization is not in progress when we're removing the
// conn. We need to set the error in case we win the race against the
// real initialization code.
if meta.err == nil {
meta.err = &roachpb.NodeUnavailableError{}
}
})
ctx.removeConnLocked(key, meta)
}
ctx.conns.Unlock()
})
return ctx
}
示例3: ListenAndServeGRPC
// ListenAndServeGRPC creates a listener and serves the specified grpc Server
// on it, closing the listener when signalled by the stopper.
func ListenAndServeGRPC(
stopper *stop.Stopper, server *grpc.Server, addr net.Addr,
) (net.Listener, error) {
ln, err := net.Listen(addr.Network(), addr.String())
if err != nil {
return ln, err
}
stopper.RunWorker(func() {
<-stopper.ShouldQuiesce()
FatalIfUnexpected(ln.Close())
<-stopper.ShouldStop()
server.Stop()
})
stopper.RunWorker(func() {
FatalIfUnexpected(server.Serve(ln))
})
return ln, nil
}
示例4: InitSenderForLocalTestCluster
// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used
// with LocalTestCluster.
func InitSenderForLocalTestCluster(
nodeDesc *roachpb.NodeDescriptor,
tracer opentracing.Tracer,
clock *hlc.Clock,
latency time.Duration,
stores client.Sender,
stopper *stop.Stopper,
gossip *gossip.Gossip,
) client.Sender {
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
senderTransportFactory := SenderTransportFactory(tracer, stores)
distSender := NewDistSender(DistSenderConfig{
Clock: clock,
RPCRetryOptions: &retryOpts,
nodeDescriptor: nodeDesc,
TransportFactory: func(
opts SendOptions,
rpcContext *rpc.Context,
replicas ReplicaSlice,
args roachpb.BatchRequest,
) (Transport, error) {
transport, err := senderTransportFactory(opts, rpcContext, replicas, args)
if err != nil {
return nil, err
}
return &localTestClusterTransport{transport, latency}, nil
},
}, gossip)
ambient := log.AmbientContext{Tracer: tracer}
return NewTxnCoordSender(
ambient,
distSender,
clock,
false, /* !linearizable */
stopper,
MakeTxnMetrics(metric.TestSampleInterval),
)
}
示例5: StartHeartbeat
// StartHeartbeat starts a periodic heartbeat to refresh this node's
// last heartbeat in the node liveness table.
func (nl *NodeLiveness) StartHeartbeat(ctx context.Context, stopper *stop.Stopper) {
log.VEventf(ctx, 1, "starting liveness heartbeat")
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
stopper.RunWorker(func() {
ambient := nl.ambientCtx
ambient.AddLogTag("hb", nil)
ticker := time.NewTicker(nl.heartbeatInterval)
defer ticker.Stop()
for {
if !nl.pauseHeartbeat.Load().(bool) {
ctx, sp := ambient.AnnotateCtxWithSpan(context.Background(), "heartbeat")
ctx, cancel := context.WithTimeout(ctx, nl.heartbeatInterval)
// Retry heartbeat in the event the conditional put fails.
for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); {
liveness, err := nl.Self()
if err != nil && err != ErrNoLivenessRecord {
log.Errorf(ctx, "unexpected error getting liveness: %v", err)
}
if err := nl.Heartbeat(ctx, liveness); err != nil {
if err == errSkippedHeartbeat {
continue
}
log.Errorf(ctx, "failed liveness heartbeat: %v", err)
}
break
}
cancel()
sp.Finish()
}
select {
case <-ticker.C:
case <-stopper.ShouldStop():
return
}
}
})
}