本文整理汇总了Golang中github.com/cockroachdb/cockroach/util.SucceedsWithin函数的典型用法代码示例。如果您正苦于以下问题:Golang SucceedsWithin函数的具体用法?Golang SucceedsWithin怎么用?Golang SucceedsWithin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SucceedsWithin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := multiTestContext{}
mtc.Start(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs, incResp := incrementArgs([]byte("a"), 5, 1, mtc.stores[0].StoreID())
if err := mtc.stores[0].ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incResp}); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetRange(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(proto.ADD_REPLICA,
proto.Replica{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.RangeMetaKey(proto.KeyMax)
meta1 := keys.RangeMetaKey(meta2)
for _, key := range []proto.Key{meta2, meta1} {
metaDesc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key)
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, 1*time.Second, func() error {
getArgs, getResp := getArgs([]byte("a"), 1, mtc.stores[1].StoreID())
getArgs.ReadConsistency = proto.INCONSISTENT
if err := mtc.stores[1].ExecuteCmd(context.Background(), proto.Call{Args: getArgs, Reply: getResp}); err != nil {
return util.Errorf("failed to read data")
}
if v := mustGetInteger(getResp.Value); v != 5 {
return util.Errorf("failed to read correct data: %d", v)
}
return nil
})
}
示例2: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
meta1 := keys.Addr(keys.RangeMetaKey(meta2))
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, replicaReadTimeout, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return util.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
示例3: TestStoreZoneUpdateAndRangeSplit
// TestStoreZoneUpdateAndRangeSplit verifies that modifying the zone
// configuration changes range max bytes and Range.maybeSplit() takes
// max bytes into account when deciding whether to enqueue a range for
// splitting. It further verifies that the range is in fact split on
// exceeding zone's RangeMaxBytes.
func TestStoreZoneUpdateAndRangeSplit(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
config.TestingSetupZoneConfigHook(stopper)
defer stopper.Stop()
maxBytes := int64(1 << 16)
// Set max bytes.
descID := uint32(keys.MaxReservedDescID + 1)
config.TestingSetZoneConfig(descID, &config.ZoneConfig{RangeMaxBytes: maxBytes})
// Trigger gossip callback.
if err := store.Gossip().AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil {
t.Fatal(err)
}
// Wait for the range to be split along table boundaries.
originalRange := store.LookupReplica(roachpb.RKey(keys.UserTableDataMin), nil)
var rng *storage.Replica
util.SucceedsWithin(t, splitTimeout, func() error {
rng = store.LookupReplica(keys.MakeTablePrefix(descID), nil)
if rng.RangeID == originalRange.RangeID {
return util.Errorf("expected new range created by split")
}
return nil
})
// Check range's max bytes settings.
if rng.GetMaxBytes() != maxBytes {
t.Fatalf("range max bytes mismatch, got: %d, expected: %d", rng.GetMaxBytes(), maxBytes)
}
// Make sure the second range goes to the end.
if !roachpb.RKeyMax.Equal(rng.Desc().EndKey) {
t.Fatalf("second range has split: %+v", rng.Desc())
}
// Look in the range after prefix we're writing to.
fillRange(store, rng.RangeID, keys.MakeTablePrefix(descID), maxBytes, t)
// Verify that the range is in fact split (give it a few seconds for very
// slow test machines).
var newRng *storage.Replica
util.SucceedsWithin(t, splitTimeout, func() error {
newRng = store.LookupReplica(keys.MakeTablePrefix(descID+1), nil)
if newRng.RangeID == rng.RangeID {
return util.Errorf("range has not yet split")
}
return nil
})
// Make sure the new range goes to the end.
if !roachpb.RKeyMax.Equal(newRng.Desc().EndKey) {
t.Fatalf("second range has split: %+v", rng.Desc())
}
}
示例4: TestStoreZoneUpdateAndRangeSplit
// TestStoreZoneUpdateAndRangeSplit verifies that modifying the zone
// configuration changes range max bytes and Range.maybeSplit() takes
// max bytes into account when deciding whether to enqueue a range for
// splitting. It further verifies that the range is in fact split on
// exceeding zone's RangeMaxBytes.
func TestStoreZoneUpdateAndRangeSplit(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
config.TestingSetupZoneConfigHook(stopper)
defer stopper.Stop()
maxBytes := int64(1 << 16)
// Set max bytes.
descID := uint32(keys.MaxReservedDescID + 1)
config.TestingSetZoneConfig(descID, &config.ZoneConfig{RangeMaxBytes: maxBytes})
// Trigger gossip callback.
if err := store.Gossip().AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil {
t.Fatal(err)
}
tableBoundary := keys.MakeTablePrefix(descID)
{
var rng *storage.Replica
// Wait for the range to be split along table boundaries.
expectedRSpan := roachpb.RSpan{Key: roachpb.RKey(tableBoundary), EndKey: roachpb.RKeyMax}
util.SucceedsWithin(t, splitTimeout, func() error {
rng = store.LookupReplica(tableBoundary, nil)
if actualRSpan := rng.Desc().RSpan(); !actualRSpan.Equal(expectedRSpan) {
return util.Errorf("expected range %s to span %s", rng, expectedRSpan)
}
return nil
})
// Check range's max bytes settings.
if actualMaxBytes := rng.GetMaxBytes(); actualMaxBytes != maxBytes {
t.Fatalf("range %s max bytes mismatch, got: %d, expected: %d", rng, actualMaxBytes, maxBytes)
}
// Look in the range after prefix we're writing to.
fillRange(store, rng.RangeID, tableBoundary, maxBytes, t)
}
// Verify that the range is in fact split.
util.SucceedsWithin(t, splitTimeout, func() error {
rng := store.LookupReplica(keys.MakeTablePrefix(descID+1), nil)
rngDesc := rng.Desc()
rngStart, rngEnd := rngDesc.StartKey, rngDesc.EndKey
if rngStart.Equal(tableBoundary) || !rngEnd.Equal(roachpb.RKeyMax) {
return util.Errorf("range %s has not yet split", rng)
}
return nil
})
}
示例5: TestReplicaGCQueueDropReplicaDirect
// TestReplicaGCQueueDropReplica verifies that a removed replica is
// immediately cleaned up.
func TestReplicaGCQueueDropReplicaDirect(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := &multiTestContext{}
const numStores = 3
rangeID := roachpb.RangeID(1)
// In this test, the Replica on the second Node is removed, and the test
// verifies that that Node adds this Replica to its RangeGCQueue. However,
// the queue does a consistent lookup which will usually be read from
// Node 1. Hence, if Node 1 hasn't processed the removal when Node 2 has,
// no GC will take place since the consistent RangeLookup hits the first
// Node. We use the TestingCommandFilter to make sure that the second Node
// waits for the first.
storage.TestingCommandFilter = func(id roachpb.StoreID, args roachpb.Request, _ roachpb.Header) error {
et, ok := args.(*roachpb.EndTransactionRequest)
if !ok || id != 2 {
return nil
}
rct := et.InternalCommitTrigger.GetChangeReplicasTrigger()
if rct == nil || rct.ChangeType != roachpb.REMOVE_REPLICA {
return nil
}
util.SucceedsWithin(t, time.Second, func() error {
r, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
if i, _ := r.Desc().FindReplica(2); i >= 0 {
return errors.New("expected second node gone from first node's known replicas")
}
return nil
})
return nil
}
defer func() { storage.TestingCommandFilter = nil }()
mtc.Start(t, numStores)
defer mtc.Stop()
mtc.replicateRange(rangeID, 1, 2)
mtc.unreplicateRange(rangeID, 1)
// Make sure the range is removed from the store.
util.SucceedsWithin(t, 10*time.Second, func() error {
if _, err := mtc.stores[1].GetReplica(rangeID); !testutils.IsError(err, "range .* was not found") {
return util.Errorf("expected range removal")
}
return nil
})
}
示例6: TestClientDisconnectRedundant
// TestClientDisconnectRedundant verifies that the gossip server
// will drop an outgoing client connection that is already an
// inbound client connection of another node.
func TestClientDisconnectRedundant(t *testing.T) {
defer leaktest.AfterTest(t)
local, remote, stopper := startGossip(t)
defer stopper.Stop()
// startClient requires locks are held, so acquire here.
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.is.NodeAddr
lAddr := local.is.NodeAddr
local.startClient(rAddr, stopper)
remote.startClient(lAddr, stopper)
local.mu.Unlock()
remote.mu.Unlock()
local.manage(stopper)
remote.manage(stopper)
util.SucceedsWithin(t, 10*time.Second, func() error {
// Check which of the clients is connected to the other.
ok1 := local.findClient(func(c *client) bool { return c.addr.String() == rAddr.String() }) != nil
ok2 := remote.findClient(func(c *client) bool { return c.addr.String() == lAddr.String() }) != nil
// We expect node 1 to disconnect; if both are still connected,
// it's possible that node 1 gossiped before node 2 connected, in
// which case we have to gossip from node 1 to trigger the
// disconnect redundant client code.
if ok1 && ok2 {
if err := local.AddInfo("local-key", nil, time.Second); err != nil {
t.Fatal(err)
}
} else if !ok1 && ok2 && verifyServerMaps(local, 1) && verifyServerMaps(remote, 0) {
return nil
}
return errors.New("local client to remote not yet closed as redundant")
})
}
示例7: TestRemoveRangeWithoutGC
// TestRemoveRangeWithoutGC ensures that we do not panic when a
// replica has been removed but not yet GC'd (and therefore
// does not have an active raft group).
func TestRemoveRangeWithoutGC(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the GC queue and move the range from store 0 to 1.
mtc.stores[0].DisableReplicaGCQueue(true)
const rangeID roachpb.RangeID = 1
mtc.replicateRange(rangeID, 1)
mtc.unreplicateRange(rangeID, 0)
// Wait for store 0 to process the removal.
util.SucceedsWithin(t, time.Second, func() error {
rep, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
desc := rep.Desc()
if len(desc.Replicas) != 1 {
return util.Errorf("range has %d replicas", len(desc.Replicas))
}
return nil
})
// The replica's data is still on disk even though the Replica
// object is removed.
var desc roachpb.RangeDescriptor
descKey := keys.RangeDescriptorKey(roachpb.RKeyMin)
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if !ok {
t.Fatal("expected range descriptor to be present")
}
// Stop and restart the store to reset the replica's raftGroup
// pointer to nil. As long as the store has not been restarted it
// can continue to use its last known replica ID.
mtc.stopStore(0)
mtc.restartStore(0)
// Turn off the GC queue to ensure that the replica is deleted at
// startup instead of by the scanner. This is not 100% guaranteed
// since the scanner could have already run at this point, but it
// should be enough to prevent us from accidentally relying on the
// scanner.
mtc.stores[0].DisableReplicaGCQueue(true)
// The Replica object is not recreated.
if _, err := mtc.stores[0].GetReplica(rangeID); err == nil {
t.Fatalf("expected replica to be missing")
}
// And the data is no longer on disk.
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if ok {
t.Fatal("expected range descriptor to be absent")
}
}
示例8: checkGossip
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, l *localcluster.Cluster, d time.Duration,
f checkGossipFunc) {
util.SucceedsWithin(t, d, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case e := <-l.Events:
if log.V(1) {
log.Infof("%+v", e)
}
return fmt.Errorf("event: %+v", e)
case <-time.After(1 * time.Second):
}
for i, node := range l.Nodes {
var m map[string]interface{}
if err := node.GetJSON("", "/_status/gossip/local", &m); err != nil {
return err
}
infos := m["infos"].(map[string]interface{})
if err := f(infos); err != nil {
return util.Errorf("node %d: %s", i, err)
}
}
return nil
})
}
示例9: TestMetricsRecording
// TestMetricsRecording verifies that Node statistics are periodically recorded
// as time series data.
func TestMetricsRecording(t *testing.T) {
defer leaktest.AfterTest(t)
tsrv := &TestServer{}
tsrv.Ctx = NewTestContext()
tsrv.Ctx.MetricsFrequency = 5 * time.Millisecond
if err := tsrv.Start(); err != nil {
t.Fatal(err)
}
defer tsrv.Stop()
checkTimeSeriesKey := func(now int64, keyName string) error {
key := ts.MakeDataKey(keyName, "", ts.Resolution10s, now)
data := &proto.InternalTimeSeriesData{}
return tsrv.db.GetProto(key, data)
}
// Verify that metrics for the current timestamp are recorded. This should
// be true very quickly.
util.SucceedsWithin(t, time.Second, func() error {
now := tsrv.Clock().PhysicalNow()
if err := checkTimeSeriesKey(now, "cr.store.livebytes.1"); err != nil {
return err
}
if err := checkTimeSeriesKey(now, "cr.node.sys.allocbytes.1"); err != nil {
return err
}
return nil
})
}
示例10: TestBuildInfo
func TestBuildInfo(t *testing.T) {
c := StartCluster(t)
defer c.AssertAndStop(t)
checkGossip(t, c, 20*time.Second, hasPeers(c.NumNodes()))
util.SucceedsWithin(t, 10*time.Second, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
default:
}
var r struct {
BuildInfo map[string]string
}
if err := getJSON(c.URL(0), "/_status/details/local", &r); err != nil {
return err
}
for _, key := range []string{"goVersion", "tag", "time", "dependencies"} {
if val, ok := r.BuildInfo[key]; !ok {
t.Errorf("build info missing for \"%s\"", key)
} else if val == "" {
t.Errorf("build info not set for \"%s\"", key)
}
}
return nil
})
}
示例11: TestClientDisallowMultipleConns
// TestClientDisallowMultipleConns verifies that the server disallows
// multiple connections from the same client node ID.
func TestClientDisallowMultipleConns(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t)
remote := startGossip(2, stopper, t)
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.is.NodeAddr
// Start two clients from local to remote. RPC client cache is
// disabled via the context, so we'll start two different outgoing
// connections.
local.startClient(rAddr, stopper)
local.startClient(rAddr, stopper)
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
util.SucceedsWithin(t, 10*time.Second, func() error {
// Verify that the remote server has only a single incoming
// connection and the local server has only a single outgoing
// connection.
local.mu.Lock()
remote.mu.Lock()
outgoing := local.outgoing.len()
incoming := remote.incoming.len()
local.mu.Unlock()
remote.mu.Unlock()
if outgoing == 1 && incoming == 1 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return util.Errorf("incorrect number of incoming (%d) or outgoing (%d) connections", incoming, outgoing)
})
}
示例12: TestStoreRangeSplitWithMaxBytesUpdate
// TestStoreRangeSplitWithMaxBytesUpdate tests a scenario where a new
// zone config that updates the max bytes is set and triggers a range
// split.
func TestStoreRangeSplitWithMaxBytesUpdate(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
config.TestingSetupZoneConfigHook(stopper)
defer stopper.Stop()
origRng := store.LookupReplica(roachpb.RKeyMin, nil)
// Set max bytes.
maxBytes := int64(1 << 16)
config.TestingSetZoneConfig(1000, &config.ZoneConfig{RangeMaxBytes: maxBytes})
// Trigger gossip callback.
if err := store.Gossip().AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil {
t.Fatal(err)
}
// Verify that the range is split and the new range has the correct max bytes.
util.SucceedsWithin(t, time.Second, func() error {
newRng := store.LookupReplica(keys.MakeTablePrefix(1000), nil)
if newRng.Desc().RangeID == origRng.Desc().RangeID {
return util.Errorf("expected new range created by split")
}
if newRng.GetMaxBytes() != maxBytes {
return util.Errorf("expected %d max bytes for the new range, but got %d",
maxBytes, newRng.GetMaxBytes())
}
return nil
})
}
示例13: TestBuildInfo
func TestBuildInfo(t *testing.T) {
if *numLocal == 0 {
t.Skip("skipping since not run against local cluster")
}
l := cluster.CreateLocal(1, 1, *logDir, stopper) // intentionally using a local cluster
l.Start()
defer l.AssertAndStop(t)
checkGossip(t, l, 20*time.Second, hasPeers(l.NumNodes()))
util.SucceedsWithin(t, 10*time.Second, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(200 * time.Millisecond):
}
var r struct {
BuildInfo map[string]string
}
if err := l.Nodes[0].GetJSON("", "/_status/details/local", &r); err != nil {
return err
}
for _, key := range []string{"goVersion", "tag", "time", "dependencies"} {
if val, ok := r.BuildInfo[key]; !ok {
t.Errorf("build info missing for \"%s\"", key)
} else if val == "" {
t.Errorf("build info not set for \"%s\"", key)
}
}
return nil
})
}
示例14: checkGossip
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration,
f checkGossipFunc) {
util.SucceedsWithin(t, d, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
for i := 0; i < c.NumNodes(); i++ {
var m map[string]interface{}
if err := getJSON(c.URL(i), "/_status/gossip/local", &m); err != nil {
return err
}
infos, ok := m["infos"].(map[string]interface{})
if !ok {
return errors.New("no infos yet")
}
if err := f(infos); err != nil {
return util.Errorf("node %d: %s", i, err)
}
}
return nil
})
}
示例15: TestScannerTiming
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
defer leaktest.AfterTest(t)
const count = 3
const runTime = 100 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
15 * time.Millisecond,
25 * time.Millisecond,
}
for i, duration := range durations {
util.SucceedsWithin(t, 10*time.Second, func() error {
ranges := newTestRangeSet(count, t)
q := &testQueue{}
s := newReplicaScanner(duration, 0, ranges)
s.AddQueues(q)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := stop.NewStopper()
s.Start(clock, stopper)
time.Sleep(runTime)
stopper.Stop()
avg := s.avgScan()
log.Infof("%d: average scan: %s", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
return util.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
return nil
})
}
}