本文整理汇总了Golang中github.com/cockroachdb/cockroach/util.IsTrueWithin函数的典型用法代码示例。如果您正苦于以下问题:Golang IsTrueWithin函数的具体用法?Golang IsTrueWithin怎么用?Golang IsTrueWithin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了IsTrueWithin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestScannerAddToQueues
// TestScannerAddToQueues verifies that ranges are added to and
// removed from multiple queues.
func TestScannerAddToQueues(t *testing.T) {
const count = 3
iter := newTestIterator(count)
q1, q2 := &testQueue{}, &testQueue{}
s := newRangeScanner(1*time.Millisecond, iter)
s.AddQueues(q1, q2)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := util.NewStopper(0)
// Start queue and verify that all ranges are added to both queues.
s.Start(clock, stopper)
if err := util.IsTrueWithin(func() bool {
return q1.count() == count && q2.count() == count
}, 50*time.Millisecond); err != nil {
t.Error(err)
}
// Remove first range and verify it does not exist in either range.
rng := iter.remove(0)
s.RemoveRange(rng)
if err := util.IsTrueWithin(func() bool {
return q1.count() == count-1 && q2.count() == count-1
}, 10*time.Millisecond); err != nil {
t.Error(err)
}
// Stop scanner and verify both queues are stopped.
stopper.Stop()
if !q1.isDone() || !q2.isDone() {
t.Errorf("expected all queues to stop; got %t, %t", q1.isDone(), q2.isDone())
}
}
示例2: TestRangeSplitsWithWritePressure
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
setTestRetryOptions()
// Rewrite a zone config with low max bytes.
zoneConfig := &proto.ZoneConfig{
ReplicaAttrs: []proto.Attributes{
{},
{},
{},
},
RangeMinBytes: 1 << 8,
RangeMaxBytes: 1 << 18,
}
if err := s.DB.Put(keys.MakeKey(keys.ConfigZonePrefix, proto.KeyMin), zoneConfig); err != nil {
t.Fatal(err)
}
// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
done := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)
// Check that we split 5 times in allotted time.
if err := util.IsTrueWithin(func() bool {
// Scan the txn records.
rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
t.Fatalf("failed to scan meta2 keys: %s", err)
}
return len(rows) >= 5
}, 6*time.Second); err != nil {
t.Errorf("failed to split 5 times: %s", err)
}
close(done)
wg.Wait()
// This write pressure test often causes splits while resolve
// intents are in flight, causing them to fail with range key
// mismatch errors. However, LocalSender should retry in these
// cases. Check here via MVCC scan that there are no dangling write
// intents. We do this using an IsTrueWithin construct to account
// for timing of finishing the test writer and a possibly-ongoing
// asynchronous split.
if err := util.IsTrueWithin(func() bool {
if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, proto.KeyMax, 0, proto.MaxTimestamp, true, nil); err != nil {
log.Infof("mvcc scan should be clean: %s", err)
return false
}
return true
}, 500*time.Millisecond); err != nil {
t.Error("failed to verify no dangling intents within 500ms")
}
}
示例3: TestChangeReplicasDescriptorInvariant
// TestChangeReplicasDuplicateError tests that a replica change aborts if
// another change has been made to the RangeDescriptor since it was initiated.
func TestChangeReplicasDescriptorInvariant(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
repl, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
addReplica := func(storeNum int, desc *roachpb.RangeDescriptor) error {
return repl.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[storeNum].Ident.NodeID,
StoreID: mtc.stores[storeNum].Ident.StoreID,
},
desc)
}
// Retain the descriptor for the range at this point.
origDesc := repl.Desc()
// Add replica to the second store, which should succeed.
if err := addReplica(1, origDesc); err != nil {
t.Fatal(err)
}
if err := util.IsTrueWithin(func() bool {
r := mtc.stores[1].LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return false
}
return true
}, time.Second); err != nil {
t.Fatal(err)
}
// Attempt to add replica to the third store with the original descriptor.
// This should fail because the descriptor is stale.
if err := addReplica(2, origDesc); err == nil {
t.Fatal("Expected error calling ChangeReplicas with stale RangeDescriptor")
}
// Add to third store with fresh descriptor.
if err := addReplica(2, repl.Desc()); err != nil {
t.Fatal(err)
}
if err := util.IsTrueWithin(func() bool {
r := mtc.stores[2].LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return false
}
return true
}, time.Second); err != nil {
t.Fatal(err)
}
}
示例4: TestStoreZoneUpdateAndRangeSplit
// TestStoreZoneUpdateAndRangeSplit verifies that modifying the zone
// configuration changes range max bytes and Range.maybeSplit() takes
// max bytes into account when deciding whether to enqueue a range for
// splitting. It further verifies that the range is in fact split on
// exceeding zone's RangeMaxBytes.
func TestStoreZoneUpdateAndRangeSplit(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
config.TestingSetupZoneConfigHook(stopper)
defer stopper.Stop()
maxBytes := int64(1 << 16)
// Set max bytes.
config.TestingSetZoneConfig(1000, &config.ZoneConfig{RangeMaxBytes: maxBytes})
// Trigger gossip callback.
if err := store.Gossip().AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil {
t.Fatal(err)
}
// Wait for the range to be split along table boundaries.
originalRange := store.LookupReplica(roachpb.RKeyMin, nil)
var rng *storage.Replica
if err := util.IsTrueWithin(func() bool {
rng = store.LookupReplica(keys.MakeTablePrefix(1000), nil)
return rng.Desc().RangeID != originalRange.Desc().RangeID
}, time.Second); err != nil {
t.Fatalf("failed to notice range max bytes update: %s", err)
}
// Check range's max bytes settings.
if rng.GetMaxBytes() != maxBytes {
t.Fatalf("range max bytes mismatch, got: %d, expected: %d", rng.GetMaxBytes(), maxBytes)
}
// Make sure the second range goes to the end.
if !roachpb.RKeyMax.Equal(rng.Desc().EndKey) {
t.Fatalf("second range has split: %+v", rng.Desc())
}
// Look in the range after prefix we're writing to.
fillRange(store, rng.Desc().RangeID, keys.MakeTablePrefix(1000), maxBytes, t)
// Verify that the range is in fact split (give it a few seconds for very
// slow test machines).
var newRng *storage.Replica
if err := util.IsTrueWithin(func() bool {
newRng = store.LookupReplica(keys.MakeTablePrefix(2000), nil)
return newRng.Desc().RangeID != rng.Desc().RangeID
}, 5*time.Second); err != nil {
t.Errorf("expected range to split within 1s")
}
// Make sure the new range goes to the end.
if !roachpb.RKeyMax.Equal(newRng.Desc().EndKey) {
t.Fatalf("second range has split: %+v", rng.Desc())
}
}
示例5: TestNodeJoin
// TestNodeJoin verifies a new node is able to join a bootstrapped
// cluster consisting of one node.
func TestNodeJoin(t *testing.T) {
defer leaktest.AfterTest(t)
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
stopper := stop.NewStopper()
_, err := BootstrapCluster("cluster-1", []engine.Engine{e}, stopper)
if err != nil {
t.Fatal(err)
}
stopper.Stop()
// Set an aggressive gossip interval to make sure information is exchanged tout de suite.
testContext.GossipInterval = gossip.TestInterval
// Start the bootstrap node.
engines1 := []engine.Engine{e}
addr1 := util.CreateTestAddr("tcp")
server1, node1, stopper1 := createAndStartTestNode(addr1, engines1, addr1, t)
defer stopper1.Stop()
// Create a new node.
engines2 := []engine.Engine{engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)}
server2, node2, stopper2 := createAndStartTestNode(util.CreateTestAddr("tcp"), engines2, server1.Addr(), t)
defer stopper2.Stop()
// Verify new node is able to bootstrap its store.
if err := util.IsTrueWithin(func() bool { return node2.lSender.GetStoreCount() == 1 }, 50*time.Millisecond); err != nil {
t.Fatal(err)
}
// Verify node1 sees node2 via gossip and vice versa.
node1Key := gossip.MakeNodeIDKey(node1.Descriptor.NodeID)
node2Key := gossip.MakeNodeIDKey(node2.Descriptor.NodeID)
if err := util.IsTrueWithin(func() bool {
nodeDesc1 := &roachpb.NodeDescriptor{}
if err := node1.ctx.Gossip.GetInfoProto(node2Key, nodeDesc1); err != nil {
return false
}
if addr2 := nodeDesc1.Address.AddressField; addr2 != server2.Addr().String() {
t.Errorf("addr2 gossip %s doesn't match addr2 address %s", addr2, server2.Addr().String())
}
nodeDesc2 := &roachpb.NodeDescriptor{}
if err := node2.ctx.Gossip.GetInfoProto(node1Key, nodeDesc2); err != nil {
return false
}
if addr1 := nodeDesc2.Address.AddressField; addr1 != server1.Addr().String() {
t.Errorf("addr1 gossip %s doesn't match addr1 address %s", addr1, server1.Addr().String())
}
return true
}, 50*time.Millisecond); err != nil {
t.Error(err)
}
}
示例6: TestRangeSplitsWithWritePressure
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
defer leaktest.AfterTest(t)
// Override default zone config.
previousMaxBytes := config.DefaultZoneConfig.RangeMaxBytes
config.DefaultZoneConfig.RangeMaxBytes = 1 << 18
defer func() { config.DefaultZoneConfig.RangeMaxBytes = previousMaxBytes }()
s := createTestDB(t)
// This is purely to silence log spam.
config.TestingSetupZoneConfigHook(s.Stopper)
defer s.Stop()
setTestRetryOptions()
// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
done := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)
// Check that we split 5 times in allotted time.
if err := util.IsTrueWithin(func() bool {
// Scan the txn records.
rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
t.Fatalf("failed to scan meta2 keys: %s", err)
}
return len(rows) >= 5
}, 6*time.Second); err != nil {
t.Errorf("failed to split 5 times: %s", err)
}
close(done)
wg.Wait()
// This write pressure test often causes splits while resolve
// intents are in flight, causing them to fail with range key
// mismatch errors. However, LocalSender should retry in these
// cases. Check here via MVCC scan that there are no dangling write
// intents. We do this using an IsTrueWithin construct to account
// for timing of finishing the test writer and a possibly-ongoing
// asynchronous split.
if err := util.IsTrueWithin(func() bool {
if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, roachpb.KeyMax, 0, roachpb.MaxTimestamp, true, nil); err != nil {
log.Infof("mvcc scan should be clean: %s", err)
return false
}
return true
}, cleanMVCCScanTimeout); err != nil {
t.Error("failed to verify no dangling intents within 500ms")
}
}
示例7: TestCoordinatorHeartbeat
// TestCoordinatorHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestCoordinatorHeartbeat(t *testing.T) {
db, _, manual := createTestDB(t)
defer db.Close()
// Set heartbeat interval to 1ms for testing.
db.coordinator.heartbeatInterval = 1 * time.Millisecond
txnID := engine.Key("txn")
<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID))
// Verify 3 heartbeats.
var heartbeatTS proto.Timestamp
for i := 0; i < 3; i++ {
if err := util.IsTrueWithin(func() bool {
ok, txn, err := getTxn(db, engine.MakeKey(engine.KeyLocalTransactionPrefix, txnID))
if !ok || err != nil {
return false
}
// Advance clock by 1ns.
// Locking the coordinator to prevent a data race.
db.coordinator.Lock()
*manual = hlc.ManualClock(*manual + 1)
db.coordinator.Unlock()
if heartbeatTS.Less(*txn.LastHeartbeat) {
heartbeatTS = *txn.LastHeartbeat
return true
}
return false
}, 50*time.Millisecond); err != nil {
t.Error("expected initial heartbeat within 50ms")
}
}
}
示例8: getLatestConfig
func getLatestConfig(s *server.TestServer, expected int) (cfg *config.SystemConfig, err error) {
err = util.IsTrueWithin(func() bool {
cfg = s.Gossip().GetSystemConfig()
return cfg != nil && len(cfg.Values) == expected
}, 500*time.Millisecond)
return
}
示例9: TestClientGossip
// TestClientGossip verifies a client can gossip a delta to the server.
func TestClientGossip(t *testing.T) {
local, remote, lserver, rserver := startGossip(t)
local.AddInfo("local-key", "local value", time.Second)
remote.AddInfo("remote-key", "remote value", time.Second)
disconnected := make(chan *client, 1)
client := newClient(remote.is.NodeAddr)
go client.start(local, disconnected)
if err := util.IsTrueWithin(func() bool {
_, lerr := remote.GetInfo("local-key")
_, rerr := local.GetInfo("remote-key")
return lerr == nil && rerr == nil
}, 500*time.Millisecond); err != nil {
t.Errorf("gossip exchange failed or taking too long")
}
remote.stop()
local.stop()
lserver.Close()
rserver.Close()
log.Info("done serving")
if client != <-disconnected {
t.Errorf("expected client disconnect after remote close")
}
}
示例10: TestCoordinatorGC
// TestCoordinatorGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateTS exceeds the timeout.
func TestCoordinatorGC(t *testing.T) {
db, _, manual := createTestDB(t)
defer db.Close()
// Set heartbeat interval to 1ms for testing.
db.coordinator.heartbeatInterval = 1 * time.Millisecond
txnID := engine.Key("txn")
<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID))
// Now, advance clock past the default client timeout.
// Locking the coordinator to prevent a data race.
db.coordinator.Lock()
*manual = hlc.ManualClock(defaultClientTimeout.Nanoseconds() + 1)
db.coordinator.Unlock()
if err := util.IsTrueWithin(func() bool {
// Locking the coordinator to prevent a data race.
db.coordinator.Lock()
_, ok := db.coordinator.txns[string(txnID)]
db.coordinator.Unlock()
return !ok
}, 50*time.Millisecond); err != nil {
t.Error("expected garbage collection")
}
}
示例11: TestStoreRangeReplicate
// TestStoreRangeReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := multiTestContext{}
mtc.Start(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyCapacityPrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ bool) { wg.Done() })
for _, s := range mtc.stores {
s.GossipCapacity()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScan(t)
// The range should become available on every node.
if err := util.IsTrueWithin(func() bool {
for _, s := range mtc.stores {
r := s.LookupRange(proto.Key("a"), proto.Key("b"))
if r == nil {
return false
}
}
return true
}, 1*time.Second); err != nil {
t.Fatal(err)
}
}
示例12: TestTxnCoordSenderGC
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGC(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Set heartbeat interval to 1ms for testing.
s.Sender.heartbeatInterval = 1 * time.Millisecond
txn := newTxn(s.Clock, proto.Key("a"))
call := proto.Call{
Args: createPutRequest(proto.Key("a"), []byte("value"), txn),
Reply: &proto.PutResponse{},
}
if err := sendCall(s.Sender, call); err != nil {
t.Fatal(err)
}
// Now, advance clock past the default client timeout.
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
s.Sender.Unlock()
if err := util.IsTrueWithin(func() bool {
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
_, ok := s.Sender.txns[string(txn.ID)]
s.Sender.Unlock()
return !ok
}, 50*time.Millisecond); err != nil {
t.Error("expected garbage collection")
}
}
示例13: TestTxnCoordSenderGC
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGC(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Set heartbeat interval to 1ms for testing.
s.Sender.heartbeatInterval = 1 * time.Millisecond
txn := client.NewTxn(*s.DB)
if pErr := txn.Put(roachpb.Key("a"), []byte("value")); pErr != nil {
t.Fatal(pErr)
}
// Now, advance clock past the default client timeout.
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
s.Sender.Unlock()
if err := util.IsTrueWithin(func() bool {
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
_, ok := s.Sender.txns[string(txn.Proto.ID)]
s.Sender.Unlock()
return !ok
}, 50*time.Millisecond); err != nil {
t.Error("expected garbage collection")
}
}
示例14: TestStoreRangeUpReplicate
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
if err := util.IsTrueWithin(func() bool {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return false
}
}
return true
}, replicationTimeout); err != nil {
t.Fatal(err)
}
}
示例15: gossipForTest
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) {
stopper := stop.NewStopper()
// Setup fake zone config handler.
config.TestingSetupZoneConfigHook(stopper)
rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper)
g := gossip.New(rpcContext, gossip.TestBootstrap)
// Have to call g.SetNodeID before call g.AddInfo
g.SetNodeID(roachpb.NodeID(1))
// Put an empty system config into gossip.
if err := g.AddInfoProto(gossip.KeySystemConfig,
&config.SystemConfig{}, 0); err != nil {
t.Fatal(err)
}
// Wait for SystemConfig.
if err := util.IsTrueWithin(func() bool {
return g.GetSystemConfig() != nil
}, 100*time.Millisecond); err != nil {
t.Fatal(err)
}
return g, stopper
}