本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/stop.NewStopper函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewStopper函數的具體用法?Golang NewStopper怎麽用?Golang NewStopper使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewStopper函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRangeIterForward
func TestRangeIterForward(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
g, clock := makeGossip(t, stopper)
ds := NewDistSender(DistSenderConfig{
Clock: clock,
RangeDescriptorDB: alphaRangeDescriptorDB,
}, g)
ctx := context.Background()
ri := NewRangeIterator(ds)
i := 0
span := roachpb.RSpan{
Key: roachpb.RKey(roachpb.KeyMin),
EndKey: roachpb.RKey([]byte("z")),
}
for ri.Seek(ctx, span.Key, Ascending); ri.Valid(); ri.Next(ctx) {
if !reflect.DeepEqual(alphaRangeDescriptors[i], ri.Desc()) {
t.Fatalf("%d: expected %v; got %v", i, alphaRangeDescriptors[i], ri.Desc())
}
i++
if !ri.NeedAnother(span) {
break
}
}
}
示例2: TestRangeStatsInit
func TestRangeStatsInit(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
stopper := stop.NewStopper()
defer stopper.Stop()
tc.Start(t, stopper)
ms := enginepb.MVCCStats{
LiveBytes: 1,
KeyBytes: 2,
ValBytes: 3,
IntentBytes: 4,
LiveCount: 5,
KeyCount: 6,
ValCount: 7,
IntentCount: 8,
IntentAge: 9,
GCBytesAge: 10,
LastUpdateNanos: 11,
}
if err := engine.MVCCSetRangeStats(context.Background(), tc.engine, 1, &ms); err != nil {
t.Fatal(err)
}
loadMS, err := engine.MVCCGetRangeStats(context.Background(), tc.engine, tc.repl.RangeID)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(ms, loadMS) {
t.Errorf("mvcc stats mismatch %+v != %+v", ms, loadMS)
}
}
示例3: TestStopper
func TestStopper(t *testing.T) {
defer leaktest.AfterTest(t)()
s := stop.NewStopper()
running := make(chan struct{})
waiting := make(chan struct{})
cleanup := make(chan struct{})
s.RunWorker(func() {
<-running
})
go func() {
<-s.ShouldStop()
select {
case <-waiting:
t.Fatal("expected stopper to have blocked")
case <-time.After(1 * time.Millisecond):
// Expected.
}
close(running)
select {
case <-waiting:
// Success.
case <-time.After(100 * time.Millisecond):
t.Fatal("stopper should have finished waiting")
}
close(cleanup)
}()
s.Stop()
close(waiting)
<-cleanup
}
示例4: TestSpanStatsGRPCResponse
func TestSpanStatsGRPCResponse(t *testing.T) {
defer leaktest.AfterTest(t)()
ts := startServer(t)
defer ts.Stopper().Stop()
rpcStopper := stop.NewStopper()
defer rpcStopper.Stop()
rpcContext := rpc.NewContext(log.AmbientContext{}, ts.RPCContext().Config, ts.Clock(), rpcStopper)
request := serverpb.SpanStatsRequest{
NodeID: "1",
StartKey: []byte(roachpb.RKeyMin),
EndKey: []byte(roachpb.RKeyMax),
}
url := ts.ServingAddr()
conn, err := rpcContext.GRPCDial(url)
if err != nil {
t.Fatal(err)
}
client := serverpb.NewStatusClient(conn)
response, err := client.SpanStats(context.Background(), &request)
if err != nil {
t.Fatal(err)
}
if a, e := int(response.RangeCount), ExpectedInitialRangeCount(); a != e {
t.Errorf("expected %d ranges, found %d", e, a)
}
}
示例5: runDebugRangeData
func runDebugRangeData(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop()
if len(args) != 2 {
return errors.New("two arguments required: dir range_id")
}
db, err := openStore(cmd, args[0], stopper)
if err != nil {
return err
}
rangeID, err := parseRangeID(args[1])
if err != nil {
return err
}
desc, err := loadRangeDescriptor(db, rangeID)
if err != nil {
return err
}
iter := storage.NewReplicaDataIterator(&desc, db, debugCtx.replicated)
for ; iter.Valid(); iter.Next() {
if _, err := printKeyValue(engine.MVCCKeyValue{
Key: iter.Key(),
Value: iter.Value(),
}); err != nil {
return err
}
}
return iter.Error()
}
示例6: TestStopperRunTaskPanic
// TestStopperRunTaskPanic ensures that a panic handler can recover panicking
// tasks, and that no tasks are leaked when they panic.
func TestStopperRunTaskPanic(t *testing.T) {
defer leaktest.AfterTest(t)()
ch := make(chan interface{})
s := stop.NewStopper(stop.OnPanic(func(v interface{}) {
ch <- v
}))
// If RunTask were not panic-safe, Stop() would deadlock.
type testFn func()
explode := func() { panic(ch) }
for i, test := range []testFn{
func() {
_ = s.RunTask(explode)
},
func() {
_ = s.RunAsyncTask(context.Background(), func(_ context.Context) { explode() })
},
func() {
_ = s.RunLimitedAsyncTask(
context.Background(),
make(chan struct{}, 1),
true, /* wait */
func(_ context.Context) { explode() },
)
},
func() {
s.RunWorker(explode)
},
} {
go test()
recovered := <-ch
if recovered != ch {
t.Errorf("%d: unexpected recovered value: %+v", i, recovered)
}
}
}
示例7: TestClientDisconnectRedundant
// TestClientDisconnectRedundant verifies that the gossip server
// will drop an outgoing client connection that is already an
// inbound client connection of another node.
func TestClientDisconnectRedundant(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
// startClient requires locks are held, so acquire here.
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.mu.is.NodeAddr
lAddr := local.mu.is.NodeAddr
local.startClient(&rAddr, remote.NodeID.Get())
remote.startClient(&lAddr, local.NodeID.Get())
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
util.SucceedsSoon(t, func() error {
// Check which of the clients is connected to the other.
ok1 := local.findClient(func(c *client) bool { return c.addr.String() == rAddr.String() }) != nil
ok2 := remote.findClient(func(c *client) bool { return c.addr.String() == lAddr.String() }) != nil
// We expect node 2 to disconnect; if both are still connected,
// it's possible that node 1 gossiped before node 2 connected, in
// which case we have to gossip from node 1 to trigger the
// disconnect redundant client code.
if ok1 && ok2 {
if err := local.AddInfo("local-key", nil, time.Second); err != nil {
t.Fatal(err)
}
} else if ok1 && !ok2 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.New("local client to remote not yet closed as redundant")
})
}
示例8: TestClientDisallowMultipleConns
// TestClientDisallowMultipleConns verifies that the server disallows
// multiple connections from the same client node ID.
func TestClientDisallowMultipleConns(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.mu.is.NodeAddr
// Start two clients from local to remote. RPC client cache is
// disabled via the context, so we'll start two different outgoing
// connections.
local.startClient(&rAddr, remote.NodeID.Get())
local.startClient(&rAddr, remote.NodeID.Get())
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
util.SucceedsSoon(t, func() error {
// Verify that the remote server has only a single incoming
// connection and the local server has only a single outgoing
// connection.
local.mu.Lock()
remote.mu.Lock()
outgoing := local.outgoing.len()
incoming := remote.mu.incoming.len()
local.mu.Unlock()
remote.mu.Unlock()
if outgoing == 1 && incoming == 1 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.Errorf("incorrect number of incoming (%d) or outgoing (%d) connections", incoming, outgoing)
})
}
示例9: startFakeServerGossips
// startFakeServerGossips creates local gossip instances and remote
// faked gossip instance. The remote gossip instance launches its
// faked gossip service just for check the client message.
func startFakeServerGossips(
t *testing.T, localNodeID roachpb.NodeID,
) (*Gossip, *fakeGossipServer, *stop.Stopper) {
stopper := stop.NewStopper()
lRPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
lserver := rpc.NewServer(lRPCContext)
local := NewTest(localNodeID, lRPCContext, lserver, nil, stopper, metric.NewRegistry())
lln, err := netutil.ListenAndServeGRPC(stopper, lserver, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
local.start(lln.Addr())
rRPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
rserver := rpc.NewServer(rRPCContext)
rln, err := netutil.ListenAndServeGRPC(stopper, rserver, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
remote := newFakeGossipServer(rserver, stopper)
addr := rln.Addr()
remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String())
return local, remote, stopper
}
示例10: TestClientGossip
// TestClientGossip verifies a client can gossip a delta to the server.
func TestClientGossip(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
disconnected := make(chan *client, 1)
c := newClient(log.AmbientContext{}, remote.GetNodeAddr(), makeMetrics())
defer func() {
stopper.Stop()
if c != <-disconnected {
t.Errorf("expected client disconnect after remote close")
}
}()
if err := local.AddInfo("local-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
if err := remote.AddInfo("remote-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
gossipSucceedsSoon(t, stopper, disconnected, map[*client]*Gossip{
c: local,
}, func() error {
if _, err := remote.GetInfo("local-key"); err != nil {
return err
}
if _, err := local.GetInfo("remote-key"); err != nil {
return err
}
return nil
})
}
示例11: TestHeartbeatCB
func TestHeartbeatCB(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
clock := hlc.NewClock(time.Unix(0, 20).UnixNano, time.Nanosecond)
serverCtx := newNodeTestContext(clock, stopper)
s, ln := newTestServer(t, serverCtx, true)
remoteAddr := ln.Addr().String()
RegisterHeartbeatServer(s, &HeartbeatService{
clock: clock,
remoteClockMonitor: serverCtx.RemoteClocks,
})
// Clocks don't matter in this test.
clientCtx := newNodeTestContext(clock, stopper)
var once sync.Once
ch := make(chan struct{})
clientCtx.HeartbeatCB = func() {
once.Do(func() {
close(ch)
})
}
_, err := clientCtx.GRPCDial(remoteAddr)
if err != nil {
t.Fatal(err)
}
<-ch
}
示例12: TestScannerTiming
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
defer leaktest.AfterTest(t)()
const count = 3
const runTime = 100 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
15 * time.Millisecond,
25 * time.Millisecond,
}
for i, duration := range durations {
testutils.SucceedsSoon(t, func() error {
ranges := newTestRangeSet(count, t)
q := &testQueue{}
s := newReplicaScanner(log.AmbientContext{}, duration, 0, ranges)
s.AddQueues(q)
mc := hlc.NewManualClock(123)
clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
stopper := stop.NewStopper()
s.Start(clock, stopper)
time.Sleep(runTime)
stopper.Stop()
avg := s.avgScan()
log.Infof(context.Background(), "%d: average scan: %s", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
return errors.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
return nil
})
}
}
示例13: TestClockOffsetMetrics
func TestClockOffsetMetrics(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 20*time.Nanosecond)
monitor := newRemoteClockMonitor(clock, time.Hour)
monitor.mu.offsets = map[string]RemoteOffset{
"0": {
Offset: 13,
Uncertainty: 7,
MeasuredAt: 6,
},
}
if err := monitor.VerifyClockOffset(context.TODO()); err != nil {
t.Fatal(err)
}
if a, e := monitor.Metrics().ClockOffsetMeanNanos.Value(), int64(13); a != e {
t.Errorf("mean %d != expected %d", a, e)
}
if a, e := monitor.Metrics().ClockOffsetStdDevNanos.Value(), int64(7); a != e {
t.Errorf("stdDev %d != expected %d", a, e)
}
}
示例14: TestLeaseErrors
// ExtendLease and ReleaseLease errors should not, by themselves, cause the
// migration process to fail. Not being able to acquire the lease should, but
// we don't test that here due to the added code that would be needed to change
// its retry settings to allow for testing it in a reasonable amount of time.
func TestLeaseErrors(t *testing.T) {
db := &fakeDB{kvs: make(map[string][]byte)}
mgr := Manager{
stopper: stop.NewStopper(),
leaseManager: &fakeLeaseManager{
extendErr: fmt.Errorf("context deadline exceeded"),
releaseErr: fmt.Errorf("context deadline exceeded"),
},
db: db,
}
defer mgr.stopper.Stop()
migration := noopMigration1
backwardCompatibleMigrations = []migrationDescriptor{migration}
if err := mgr.EnsureMigrations(context.Background()); err != nil {
t.Error(err)
}
if _, ok := db.kvs[string(migrationKey(migration))]; !ok {
t.Errorf("expected key %s to be written, but it wasn't", migrationKey(migration))
}
if len(db.kvs) != len(backwardCompatibleMigrations) {
t.Errorf("expected %d key to be written, but %d were",
len(backwardCompatibleMigrations), len(db.kvs))
}
}
示例15: TestClientForwardUnresolved
// TestClientForwardUnresolved verifies that a client does not resolve a forward
// address prematurely.
func TestClientForwardUnresolved(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
const nodeID = 1
local := startGossip(nodeID, stopper, t, metric.NewRegistry())
addr := local.GetNodeAddr()
client := newClient(log.AmbientContext{}, addr, makeMetrics()) // never started
newAddr := util.UnresolvedAddr{
NetworkField: "tcp",
AddressField: "localhost:2345",
}
reply := &Response{
NodeID: nodeID,
Addr: *addr,
AlternateNodeID: nodeID + 1,
AlternateAddr: &newAddr,
}
if err := client.handleResponse(
context.TODO(), local, reply,
); !testutils.IsError(err, "received forward") {
t.Fatal(err)
}
if !proto.Equal(client.forwardAddr, &newAddr) {
t.Fatalf("unexpected forward address %v, expected %v", client.forwardAddr, &newAddr)
}
}