本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/testutils.SucceedsSoon函數的典型用法代碼示例。如果您正苦於以下問題:Golang SucceedsSoon函數的具體用法?Golang SucceedsSoon怎麽用?Golang SucceedsSoon使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了SucceedsSoon函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRaftTransportCircuitBreaker
// TestRaftTransportCircuitBreaker verifies that messages will be
// dropped waiting for raft node connection to be established.
func TestRaftTransportCircuitBreaker(t *testing.T) {
defer leaktest.AfterTest(t)()
rttc := newRaftTransportTestContext(t)
defer rttc.Stop()
serverReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
_, serverAddr := rttc.AddNodeWithoutGossip(serverReplica.NodeID, util.TestAddr, rttc.stopper)
serverChannel := rttc.ListenStore(serverReplica.NodeID, serverReplica.StoreID)
clientReplica := roachpb.ReplicaDescriptor{
NodeID: 1,
StoreID: 1,
ReplicaID: 1,
}
clientTransport := rttc.AddNode(clientReplica.NodeID)
// The transport is set up asynchronously, so we expect the first
// Send to return true here.
if !rttc.Send(clientReplica, serverReplica, 1, raftpb.Message{Commit: 1}) {
t.Errorf("unexpectedly failed sending while connection is being asynchronously established")
}
// However, sending repeated messages should begin dropping once
// the circuit breaker does trip.
testutils.SucceedsSoon(t, func() error {
if rttc.Send(clientReplica, serverReplica, 1, raftpb.Message{Commit: 1}) {
return errors.Errorf("expected circuit breaker to trip")
}
return nil
})
// Now, gossip address of server.
rttc.GossipNode(serverReplica.NodeID, serverAddr)
// Keep sending commit=2 until breaker resets and we receive the
// first instance. It's possible an earlier message for commit=1
// snuck in.
testutils.SucceedsSoon(t, func() error {
if !rttc.Send(clientReplica, serverReplica, 1, raftpb.Message{Commit: 2}) {
clientTransport.GetCircuitBreaker(serverReplica.NodeID).Reset()
}
select {
case req := <-serverChannel.ch:
if req.Message.Commit == 2 {
return nil
}
default:
}
return errors.Errorf("expected message commit=2")
})
}
示例2: TestOffsetMeasurement
func TestOffsetMeasurement(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
serverTime := time.Unix(0, 20)
serverClock := hlc.NewClock(serverTime.UnixNano, time.Nanosecond)
serverCtx := newNodeTestContext(serverClock, stopper)
s, ln := newTestServer(t, serverCtx, true)
remoteAddr := ln.Addr().String()
RegisterHeartbeatServer(s, &HeartbeatService{
clock: serverClock,
remoteClockMonitor: serverCtx.RemoteClocks,
})
// Create a client clock that is behind the server clock.
clientAdvancing := AdvancingClock{time: time.Unix(0, 10)}
clientClock := hlc.NewClock(clientAdvancing.UnixNano, time.Nanosecond)
clientCtx := newNodeTestContext(clientClock, stopper)
clientCtx.RemoteClocks.offsetTTL = 5 * clientAdvancing.getAdvancementInterval()
if _, err := clientCtx.GRPCDial(remoteAddr); err != nil {
t.Fatal(err)
}
expectedOffset := RemoteOffset{Offset: 10, Uncertainty: 0, MeasuredAt: 10}
testutils.SucceedsSoon(t, func() error {
clientCtx.RemoteClocks.mu.Lock()
defer clientCtx.RemoteClocks.mu.Unlock()
if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; !ok {
return errors.Errorf("expected offset of %s to be initialized, but it was not", remoteAddr)
} else if o != expectedOffset {
return errors.Errorf("expected:\n%v\nactual:\n%v", expectedOffset, o)
}
return nil
})
// Change the client such that it receives a heartbeat right after the
// maximum clock reading delay.
clientAdvancing.setAdvancementInterval(
maximumPingDurationMult*clientClock.MaxOffset() + 1*time.Nanosecond)
testutils.SucceedsSoon(t, func() error {
clientCtx.RemoteClocks.mu.Lock()
defer clientCtx.RemoteClocks.mu.Unlock()
if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; ok {
return errors.Errorf("expected offset to have been cleared, but found %s", o)
}
return nil
})
}
示例3: TestRangeSplitsWithWritePressure
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
defer leaktest.AfterTest(t)()
// Override default zone config.
cfg := config.DefaultZoneConfig()
cfg.RangeMaxBytes = 1 << 18
defer config.TestingSetDefaultZoneConfig(cfg)()
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions = retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 10 * time.Millisecond,
Multiplier: 2,
}
s, _ := createTestDBWithContext(t, dbCtx)
// This is purely to silence log spam.
config.TestingSetupZoneConfigHook(s.Stopper)
defer s.Stop()
// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
done := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)
// Check that we split 5 times in allotted time.
testutils.SucceedsSoon(t, func() error {
// Scan the txn records.
rows, err := s.DB.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return errors.Errorf("failed to scan meta2 keys: %s", err)
}
if lr := len(rows); lr < 5 {
return errors.Errorf("expected >= 5 scans; got %d", lr)
}
return nil
})
close(done)
wg.Wait()
// This write pressure test often causes splits while resolve
// intents are in flight, causing them to fail with range key
// mismatch errors. However, LocalSender should retry in these
// cases. Check here via MVCC scan that there are no dangling write
// intents. We do this using a SucceedsSoon construct to account
// for timing of finishing the test writer and a possibly-ongoing
// asynchronous split.
testutils.SucceedsSoon(t, func() error {
if _, _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil); err != nil {
return errors.Errorf("failed to verify no dangling intents: %s", err)
}
return nil
})
}
示例4: TestScannerDisabled
// TestScannerDisabled verifies that disabling a scanner prevents
// replicas from being added to queues.
func TestScannerDisabled(t *testing.T) {
defer leaktest.AfterTest(t)()
const count = 3
ranges := newTestRangeSet(count, t)
q := &testQueue{}
s := newReplicaScanner(log.AmbientContext{}, 1*time.Millisecond, 0, ranges)
s.AddQueues(q)
mc := hlc.NewManualClock(123)
clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
stopper := stop.NewStopper()
s.Start(clock, stopper)
defer stopper.Stop()
// Verify queue gets all ranges.
testutils.SucceedsSoon(t, func() error {
if q.count() != count {
return errors.Errorf("expected %d replicas; have %d", count, q.count())
}
if s.scanCount() == 0 {
return errors.Errorf("expected scanner count to increment")
}
return nil
})
lastWaitEnabledCount := s.waitEnabledCount()
// Now, disable the scanner.
s.SetDisabled(true)
testutils.SucceedsSoon(t, func() error {
if s.waitEnabledCount() == lastWaitEnabledCount {
return errors.Errorf("expected scanner to stop when disabled")
}
return nil
})
lastScannerCount := s.scanCount()
// Remove the replicas and verify the scanner still removes them while disabled.
ranges.Visit(func(repl *Replica) bool {
s.RemoveReplica(repl)
return true
})
testutils.SucceedsSoon(t, func() error {
if qc := q.count(); qc != 0 {
return errors.Errorf("expected queue to be empty after replicas removed from scanner; got %d", qc)
}
return nil
})
if sc := s.scanCount(); sc != lastScannerCount {
t.Errorf("expected scanner count to not increment: %d != %d", sc, lastScannerCount)
}
}
示例5: TestReplicaGCQueueDropReplicaDirect
// TestReplicaGCQueueDropReplica verifies that a removed replica is
// immediately cleaned up.
func TestReplicaGCQueueDropReplicaDirect(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
const numStores = 3
rangeID := roachpb.RangeID(1)
// In this test, the Replica on the second Node is removed, and the test
// verifies that that Node adds this Replica to its RangeGCQueue. However,
// the queue does a consistent lookup which will usually be read from
// Node 1. Hence, if Node 1 hasn't processed the removal when Node 2 has,
// no GC will take place since the consistent RangeLookup hits the first
// Node. We use the TestingCommandFilter to make sure that the second Node
// waits for the first.
cfg := storage.TestStoreConfig(nil)
mtc.storeConfig = &cfg
mtc.storeConfig.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest)
if !ok || filterArgs.Sid != 2 {
return nil
}
crt := et.InternalCommitTrigger.GetChangeReplicasTrigger()
if crt == nil || crt.ChangeType != roachpb.REMOVE_REPLICA {
return nil
}
testutils.SucceedsSoon(t, func() error {
r, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
if _, ok := r.Desc().GetReplicaDescriptor(2); ok {
return errors.New("expected second node gone from first node's known replicas")
}
return nil
})
return nil
}
defer mtc.Stop()
mtc.Start(t, numStores)
mtc.replicateRange(rangeID, 1, 2)
mtc.unreplicateRange(rangeID, 1)
// Make sure the range is removed from the store.
testutils.SucceedsSoon(t, func() error {
if _, err := mtc.stores[1].GetReplica(rangeID); !testutils.IsError(err, "range .* was not found") {
return errors.Errorf("expected range removal: %v", err) // NB: errors.Wrapf(nil, ...) returns nil.
}
return nil
})
}
示例6: TestFailedOffsetMeasurement
func TestFailedOffsetMeasurement(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
// Can't be zero because that'd be an empty offset.
clock := hlc.NewClock(time.Unix(0, 1).UnixNano, time.Nanosecond)
serverCtx := newNodeTestContext(clock, stopper)
s, ln := newTestServer(t, serverCtx, true)
remoteAddr := ln.Addr().String()
heartbeat := &ManualHeartbeatService{
clock: clock,
remoteClockMonitor: serverCtx.RemoteClocks,
ready: make(chan struct{}),
stopper: stopper,
}
RegisterHeartbeatServer(s, heartbeat)
// Create a client that never receives a heartbeat after the first.
clientCtx := newNodeTestContext(clock, stopper)
// Increase the timeout so that failure arises from exceeding the maximum
// clock reading delay, not the timeout.
clientCtx.HeartbeatTimeout = 20 * clientCtx.HeartbeatInterval
if _, err := clientCtx.GRPCDial(remoteAddr); err != nil {
t.Fatal(err)
}
heartbeat.ready <- struct{}{} // Allow one heartbeat for initialization.
testutils.SucceedsSoon(t, func() error {
clientCtx.RemoteClocks.mu.Lock()
defer clientCtx.RemoteClocks.mu.Unlock()
if _, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; !ok {
return errors.Errorf("expected offset of %s to be initialized, but it was not", remoteAddr)
}
return nil
})
testutils.SucceedsSoon(t, func() error {
serverCtx.RemoteClocks.mu.Lock()
defer serverCtx.RemoteClocks.mu.Unlock()
if o, ok := serverCtx.RemoteClocks.mu.offsets[remoteAddr]; ok {
return errors.Errorf("expected offset of %s to not be initialized, but it was: %v", remoteAddr, o)
}
return nil
})
}
示例7: TestComputeStatsForKeySpan
func TestComputeStatsForKeySpan(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
defer mtc.Stop()
mtc.Start(t, 3)
// Create a number of ranges using splits.
splitKeys := []string{"a", "c", "e", "g", "i"}
for _, k := range splitKeys {
key := []byte(k)
repl := mtc.stores[0].LookupReplica(key, roachpb.RKeyMin)
args := adminSplitArgs(key, key)
header := roachpb.Header{
RangeID: repl.RangeID,
}
if _, err := client.SendWrappedWith(context.Background(), mtc.stores[0], header, args); err != nil {
t.Fatal(err)
}
}
// Wait for splits to finish.
testutils.SucceedsSoon(t, func() error {
repl := mtc.stores[0].LookupReplica(roachpb.RKey("z"), nil)
if actualRSpan := repl.Desc().RSpan(); !actualRSpan.Key.Equal(roachpb.RKey("i")) {
return errors.Errorf("expected range %s to begin at key 'i'", repl)
}
return nil
})
// Create some keys across the ranges.
incKeys := []string{"b", "bb", "bbb", "d", "dd", "h"}
for _, k := range incKeys {
if _, err := mtc.dbs[0].Inc(context.TODO(), []byte(k), 5); err != nil {
t.Fatal(err)
}
}
// Verify stats across different spans.
for _, tcase := range []struct {
startKey string
endKey string
expectedRanges int
expectedKeys int64
}{
{"a", "i", 4, 6},
{"a", "c", 1, 3},
{"b", "e", 2, 5},
{"e", "i", 2, 1},
} {
start, end := tcase.startKey, tcase.endKey
stats, count := mtc.stores[0].ComputeStatsForKeySpan(
roachpb.RKey(start), roachpb.RKey(end))
if a, e := count, tcase.expectedRanges; a != e {
t.Errorf("Expected %d ranges in span [%s - %s], found %d", e, start, end, a)
}
if a, e := stats.LiveCount, tcase.expectedKeys; a != e {
t.Errorf("Expected %d keys in span [%s - %s], found %d", e, start, end, a)
}
}
}
示例8: TestHealthAPI
func TestHealthAPI(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
// We need to retry because the node ID isn't set until after
// bootstrapping.
testutils.SucceedsSoon(t, func() error {
var resp serverpb.HealthResponse
return getAdminJSONProto(s, "health", &resp)
})
// Expire this node's liveness record by pausing heartbeats and advancing the
// server's clock.
ts := s.(*TestServer)
ts.nodeLiveness.PauseHeartbeat(true)
self, err := ts.nodeLiveness.Self()
if err != nil {
t.Fatal(err)
}
s.Clock().Update(self.Expiration.Add(1, 0))
expected := "node is not live"
var resp serverpb.HealthResponse
if err := getAdminJSONProto(s, "health", &resp); !testutils.IsError(err, expected) {
t.Errorf("expected %q error, got %v", expected, err)
}
}
示例9: TestSchedulerBuffering
// Verify that when we enqueue the same range multiple times for the same
// reason, it is only processed once.
func TestSchedulerBuffering(t *testing.T) {
defer leaktest.AfterTest(t)()
p := newTestProcessor()
s := newRaftScheduler(log.AmbientContext{}, nil, p, 1)
stopper := stop.NewStopper()
defer stopper.Stop()
s.Start(stopper)
testCases := []struct {
state raftScheduleState
expected string
}{
{stateRaftReady, "ready=[1:1] request=[] tick=[]"},
{stateRaftRequest, "ready=[1:1] request=[1:1] tick=[]"},
{stateRaftTick, "ready=[1:1] request=[1:1] tick=[1:1]"},
{stateRaftReady | stateRaftRequest | stateRaftTick, "ready=[1:2] request=[1:2] tick=[1:2]"},
}
for _, c := range testCases {
s.signal(s.enqueueN(c.state, 1, 1, 1, 1, 1))
testutils.SucceedsSoon(t, func() error {
if s := p.String(); c.expected != s {
return errors.Errorf("expected %s, but got %s", c.expected, s)
}
return nil
})
}
}
示例10: TestTxnCoordSenderGCTimeout
// TestTxnCoordSenderGCTimeout verifies that the coordinator cleans up extant
// transactions and intents after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGCTimeout(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
// Set heartbeat interval to 1ms for testing.
sender.heartbeatInterval = 1 * time.Millisecond
txn := client.NewTxn(context.Background(), *s.DB)
key := roachpb.Key("a")
if err := txn.Put(key, []byte("value")); err != nil {
t.Fatal(err)
}
// Now, advance clock past the default client timeout.
// Locking the TxnCoordSender to prevent a data race.
sender.txnMu.Lock()
s.Manual.Increment(defaultClientTimeout.Nanoseconds() + 1)
sender.txnMu.Unlock()
txnID := *txn.Proto.ID
testutils.SucceedsSoon(t, func() error {
// Locking the TxnCoordSender to prevent a data race.
sender.txnMu.Lock()
_, ok := sender.txnMu.txns[txnID]
sender.txnMu.Unlock()
if ok {
return errors.Errorf("expected garbage collection")
}
return nil
})
verifyCleanup(key, sender, s.Eng, t)
}
示例11: startBankTransfers
func startBankTransfers(t testing.TB, stopper *stop.Stopper, sqlDB *gosql.DB, numAccounts int) {
const maxTransfer = 999
for {
select {
case <-stopper.ShouldQuiesce():
return // All done.
default:
// Keep going.
}
from := rand.Intn(numAccounts)
to := rand.Intn(numAccounts - 1)
for from == to {
to = numAccounts - 1
}
amount := rand.Intn(maxTransfer)
const update = `UPDATE bench.bank
SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END
WHERE id IN ($1, $2)`
testutils.SucceedsSoon(t, func() error {
select {
case <-stopper.ShouldQuiesce():
return nil // All done.
default:
// Keep going.
}
_, err := sqlDB.Exec(update, from, to, amount)
return err
})
}
}
示例12: TestEagerReplication
func TestEagerReplication(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
store, _ := createTestStore(t, stopper)
// Disable the replica scanner so that we rely on the eager replication code
// path that occurs after splits.
store.SetReplicaScannerActive(false)
if err := server.WaitForInitialSplits(store.DB()); err != nil {
t.Fatal(err)
}
// WaitForInitialSplits will return as soon as the meta2 span contains the
// expected number of descriptors. But the addition of replicas to the
// replicateQueue after a split occurs happens after the update of the
// descriptors in meta2 leaving a tiny window of time in which the newly
// split replica will not have been added to purgatory. Thus we loop.
testutils.SucceedsSoon(t, func() error {
// After the initial splits have been performed, all of the resulting ranges
// should be present in replicate queue purgatory (because we only have a
// single store in the test and thus replication cannot succeed).
expected := server.ExpectedInitialRangeCount()
if n := store.ReplicateQueuePurgatoryLength(); expected != n {
return errors.Errorf("expected %d replicas in purgatory, but found %d", expected, n)
}
return nil
})
}
示例13: testBuildInfoInner
func testBuildInfoInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
CheckGossip(ctx, t, c, 20*time.Second, HasPeers(c.NumNodes()))
var details serverpb.DetailsResponse
testutils.SucceedsSoon(t, func() error {
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
default:
}
return httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, 0)+"/_status/details/local", &details)
})
bi := details.BuildInfo
testData := map[string]string{
"go_version": bi.GoVersion,
"tag": bi.Tag,
"time": bi.Time,
"dependencies": bi.Dependencies,
}
for key, val := range testData {
if val == "" {
t.Errorf("build info not set for \"%s\"", key)
}
}
}
示例14: TestClientDisallowMultipleConns
// TestClientDisallowMultipleConns verifies that the server disallows
// multiple connections from the same client node ID.
func TestClientDisallowMultipleConns(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.mu.is.NodeAddr
// Start two clients from local to remote. RPC client cache is
// disabled via the context, so we'll start two different outgoing
// connections.
local.startClient(&rAddr)
local.startClient(&rAddr)
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
testutils.SucceedsSoon(t, func() error {
// Verify that the remote server has only a single incoming
// connection and the local server has only a single outgoing
// connection.
local.mu.Lock()
remote.mu.Lock()
outgoing := local.outgoing.len()
incoming := remote.mu.incoming.len()
local.mu.Unlock()
remote.mu.Unlock()
if outgoing == 1 && incoming == 1 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.Errorf("incorrect number of incoming (%d) or outgoing (%d) connections", incoming, outgoing)
})
}
示例15: TestClientDisconnectRedundant
// TestClientDisconnectRedundant verifies that the gossip server
// will drop an outgoing client connection that is already an
// inbound client connection of another node.
func TestClientDisconnectRedundant(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
// startClient requires locks are held, so acquire here.
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.mu.is.NodeAddr
lAddr := local.mu.is.NodeAddr
local.startClient(&rAddr)
remote.startClient(&lAddr)
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
testutils.SucceedsSoon(t, func() error {
// Check which of the clients is connected to the other.
ok1 := local.findClient(func(c *client) bool { return c.addr.String() == rAddr.String() }) != nil
ok2 := remote.findClient(func(c *client) bool { return c.addr.String() == lAddr.String() }) != nil
// We expect node 2 to disconnect; if both are still connected,
// it's possible that node 1 gossiped before node 2 connected, in
// which case we have to gossip from node 1 to trigger the
// disconnect redundant client code.
if ok1 && ok2 {
if err := local.AddInfo("local-key", nil, time.Second); err != nil {
t.Fatal(err)
}
} else if ok1 && !ok2 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.New("local client to remote not yet closed as redundant")
})
}