本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/storage.TestStoreConfig函數的典型用法代碼示例。如果您正苦於以下問題:Golang TestStoreConfig函數的具體用法?Golang TestStoreConfig怎麽用?Golang TestStoreConfig使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了TestStoreConfig函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestStoreRangeMergeStats
// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
storeCfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
// Split the range.
aDesc, bDesc, pErr := createSplitRanges(store)
if pErr != nil {
t.Fatal(pErr)
}
// Write some values left and right of the proposed split key.
writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))
// Get the range stats for both ranges now that we have data.
snap := store.Engine().NewSnapshot()
defer snap.Close()
msA, err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID)
if err != nil {
t.Fatal(err)
}
msB, err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID)
if err != nil {
t.Fatal(err)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range A's stats before split: %v", err)
}
if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range B's stats before split: %v", err)
}
manual.Increment(100)
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
replMerged := store.LookupReplica(aDesc.StartKey, nil)
// Get the range stats for the merged range and verify.
snap = store.Engine().NewSnapshot()
defer snap.Close()
msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, replMerged.RangeID)
if err != nil {
t.Fatal(err)
}
// Merged stats should agree with recomputation.
if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
t.Errorf("failed to verify range's stats after merge: %v", err)
}
}
示例2: TestStoreRangeMergeTwoEmptyRanges
// TestStoreRangeMergeTwoEmptyRanges tries to merge two empty ranges together.
func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
if _, _, err := createSplitRanges(store); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
_, err := client.SendWrapped(context.Background(), rg1(store), &args)
if err != nil {
t.Fatal(err)
}
// Verify the merge by looking up keys from both ranges.
replicaA := store.LookupReplica([]byte("a"), nil)
replicaB := store.LookupReplica([]byte("c"), nil)
if !reflect.DeepEqual(replicaA, replicaB) {
t.Fatalf("ranges were not merged %s!=%s", replicaA, replicaB)
}
}
示例3: TestConsistencyQueueRequiresLive
// TestConsistencyQueueRequiresLive verifies the queue will not
// process ranges whose replicas are not all live.
func TestConsistencyQueueRequiresLive(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := storage.TestStoreConfig(nil)
mtc := &multiTestContext{storeConfig: &sc}
defer mtc.Stop()
mtc.Start(t, 3)
// Replicate the range to three nodes.
repl := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
rangeID := repl.RangeID
mtc.replicateRange(rangeID, 1, 2)
// Verify that queueing is immediately possible.
if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue(
context.TODO(), mtc.clock.Now(), repl, config.SystemConfig{}); !shouldQ {
t.Fatalf("expected shouldQ true; got %t, %f", shouldQ, priority)
}
// Stop a node and expire leases.
mtc.stopStore(2)
mtc.expireLeases(context.TODO())
if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue(
context.TODO(), mtc.clock.Now(), repl, config.SystemConfig{}); shouldQ {
t.Fatalf("expected shouldQ false; got %t, %f", shouldQ, priority)
}
}
示例4: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.TestStoreConfig(nil)
stopper := stop.NewStopper()
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
cfg.Gossip = gossip.NewTest(
0,
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry(),
)
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
cfg.Gossip.SetResolvers([]resolver.Resolver{r})
cfg.Gossip.Start(ln.Addr())
}
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, cfg.Gossip)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
sender := kv.NewTxnCoordSender(
cfg.AmbientCtx,
distSender,
cfg.Clock,
false,
stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
示例5: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
nc := &base.NodeIDContainer{}
ambient.AddLogTag("n", nc)
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
ltc.Stopper.AddCloser(ltc.Eng)
ltc.Stores = storage.NewStores(ambient, ltc.Clock)
ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
cfg := storage.TestStoreConfig()
if ltc.RangeRetryOptions != nil {
cfg.RangeRetryOptions = *ltc.RangeRetryOptions
}
cfg.AmbientCtx = ambient
cfg.Clock = ltc.Clock
cfg.DB = ltc.DB
cfg.Gossip = ltc.Gossip
cfg.Transport = transport
cfg.MetricsSampleInterval = metric.TestSampleInterval
ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
nc.Set(context.TODO(), nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
示例6: TestLeaseNotUsedAfterRestart
// Test that leases held before a restart are not used after the restart.
// See replica.mu.minLeaseProposedTS for the reasons why this isn't allowed.
func TestLeaseNotUsedAfterRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := storage.TestStoreConfig(nil)
var leaseAcquisitionTrap atomic.Value
// Disable the split queue so that no ranges are split. This makes it easy
// below to trap any lease request and infer that it refers to the range we're
// interested in.
sc.TestingKnobs.DisableSplitQueue = true
sc.TestingKnobs.LeaseRequestEvent = func(ts hlc.Timestamp) {
val := leaseAcquisitionTrap.Load()
if val == nil {
return
}
trapCallback := val.(func(ts hlc.Timestamp))
if trapCallback != nil {
trapCallback(ts)
}
}
mtc := &multiTestContext{storeConfig: &sc}
mtc.Start(t, 1)
defer mtc.Stop()
// Send a read, to acquire a lease.
getArgs := getArgs([]byte("a"))
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
t.Fatal(err)
}
// Restart the mtc. Before we do that, we're installing a callback used to
// assert that a new lease has been requested. The callback is installed
// before the restart, as the lease might be requested at any time and for
// many reasons by background processes, even before we send the read below.
leaseAcquisitionCh := make(chan error)
var once sync.Once
leaseAcquisitionTrap.Store(func(_ hlc.Timestamp) {
once.Do(func() {
close(leaseAcquisitionCh)
})
})
mtc.restart()
// Send another read and check that the pre-existing lease has not been used.
// Concretely, we check that a new lease is requested.
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
t.Fatal(err)
}
// Check that the Send above triggered a lease acquisition.
select {
case <-leaseAcquisitionCh:
case <-time.After(time.Second):
t.Fatalf("read did not acquire a new lease")
}
}
示例7: TestReplicaGCQueueDropReplicaDirect
// TestReplicaGCQueueDropReplica verifies that a removed replica is
// immediately cleaned up.
func TestReplicaGCQueueDropReplicaDirect(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
const numStores = 3
rangeID := roachpb.RangeID(1)
// In this test, the Replica on the second Node is removed, and the test
// verifies that that Node adds this Replica to its RangeGCQueue. However,
// the queue does a consistent lookup which will usually be read from
// Node 1. Hence, if Node 1 hasn't processed the removal when Node 2 has,
// no GC will take place since the consistent RangeLookup hits the first
// Node. We use the TestingCommandFilter to make sure that the second Node
// waits for the first.
cfg := storage.TestStoreConfig(nil)
mtc.storeConfig = &cfg
mtc.storeConfig.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest)
if !ok || filterArgs.Sid != 2 {
return nil
}
crt := et.InternalCommitTrigger.GetChangeReplicasTrigger()
if crt == nil || crt.ChangeType != roachpb.REMOVE_REPLICA {
return nil
}
testutils.SucceedsSoon(t, func() error {
r, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
if _, ok := r.Desc().GetReplicaDescriptor(2); ok {
return errors.New("expected second node gone from first node's known replicas")
}
return nil
})
return nil
}
defer mtc.Stop()
mtc.Start(t, numStores)
mtc.replicateRange(rangeID, 1, 2)
mtc.unreplicateRange(rangeID, 1)
// Make sure the range is removed from the store.
testutils.SucceedsSoon(t, func() error {
if _, err := mtc.stores[1].GetReplica(rangeID); !testutils.IsError(err, "range .* was not found") {
return errors.Errorf("expected range removal: %v", err) // NB: errors.Wrapf(nil, ...) returns nil.
}
return nil
})
}
示例8: TestStoreRangeMergeLastRange
// TestStoreRangeMergeLastRange verifies that merging the last range
// fails.
func TestStoreRangeMergeLastRange(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
// Merge last range.
args := adminMergeArgs(roachpb.KeyMin)
if _, pErr := client.SendWrapped(context.Background(), rg1(store), &args); !testutils.IsPError(pErr, "cannot merge final range") {
t.Fatalf("expected 'cannot merge final range' error; got %s", pErr)
}
}
示例9: BenchmarkStoreRangeMerge
func BenchmarkStoreRangeMerge(b *testing.B) {
defer tracing.Disable()()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithConfig(b, stopper, storeCfg)
// Perform initial split of ranges.
sArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil {
b.Fatal(err)
}
// Write some values left and right of the proposed split key.
aDesc := store.LookupReplica([]byte("a"), nil).Desc()
bDesc := store.LookupReplica([]byte("c"), nil).Desc()
writeRandomDataToRange(b, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(b, store, bDesc.RangeID, []byte("ccc"))
// Create args to merge the b range back into the a range.
mArgs := adminMergeArgs(roachpb.KeyMin)
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Merge the ranges.
b.StartTimer()
if _, err := client.SendWrapped(context.Background(), rg1(store), mArgs); err != nil {
b.Fatal(err)
}
// Split the range.
b.StopTimer()
if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil {
b.Fatal(err)
}
}
}
示例10: TestStoreRangeMergeMetadataCleanup
// TestStoreRangeMergeMetadataCleanup tests that all metadata of a
// subsumed range is cleaned up on merge.
func TestStoreRangeMergeMetadataCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
scan := func(f func(roachpb.KeyValue) (bool, error)) {
if _, err := engine.MVCCIterate(context.Background(), store.Engine(), roachpb.KeyMin, roachpb.KeyMax, hlc.ZeroTimestamp, true, nil, false, f); err != nil {
t.Fatal(err)
}
}
content := roachpb.Key("testing!")
// Write some values left of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
t.Fatal(err)
}
// Collect all the keys.
preKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
preKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Split the range.
_, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values right of the split key.
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
// Collect all the keys again.
postKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
postKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Compute the new keys.
for k := range preKeys {
delete(postKeys, k)
}
// Keep only the subsumed range's local keys.
localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(bDesc.RangeID))
for k := range postKeys {
if !strings.HasPrefix(k, localRangeKeyPrefix) {
delete(postKeys, k)
}
}
if numKeys := len(postKeys); numKeys > 0 {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys)
for k := range postKeys {
fmt.Fprintf(&buf, "%q\n", k)
}
t.Fatal(buf.String())
}
}
示例11: TestRangeTransferLease
func TestRangeTransferLease(t *testing.T) {
defer leaktest.AfterTest(t)()
cfg := storage.TestStoreConfig(nil)
var filterMu syncutil.Mutex
var filter func(filterArgs storagebase.FilterArgs) *roachpb.Error
cfg.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
filterMu.Lock()
filterCopy := filter
filterMu.Unlock()
if filterCopy != nil {
return filterCopy(filterArgs)
}
return nil
}
var waitForTransferBlocked atomic.Value
waitForTransferBlocked.Store(false)
transferBlocked := make(chan struct{})
cfg.TestingKnobs.LeaseTransferBlockedOnExtensionEvent = func(
_ roachpb.ReplicaDescriptor) {
if waitForTransferBlocked.Load().(bool) {
transferBlocked <- struct{}{}
waitForTransferBlocked.Store(false)
}
}
mtc := &multiTestContext{}
mtc.storeConfig = &cfg
mtc.Start(t, 2)
defer mtc.Stop()
// First, do a write; we'll use it to determine when the dust has settled.
leftKey := roachpb.Key("a")
incArgs := incrementArgs(leftKey, 1)
if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], &incArgs); pErr != nil {
t.Fatal(pErr)
}
// Get the left range's ID.
rangeID := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil).RangeID
// Replicate the left range onto node 1.
mtc.replicateRange(rangeID, 1)
replica0 := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil)
replica1 := mtc.stores[1].LookupReplica(roachpb.RKey("a"), nil)
gArgs := getArgs(leftKey)
replica0Desc, err := replica0.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
// Check that replica0 can serve reads OK.
if _, pErr := client.SendWrappedWith(
context.Background(),
mtc.senders[0],
roachpb.Header{Replica: replica0Desc},
&gArgs,
); pErr != nil {
t.Fatal(pErr)
}
{
// Transferring the lease to ourself should be a no-op.
origLeasePtr, _ := replica0.GetLease()
origLease := *origLeasePtr
if err := replica0.AdminTransferLease(replica0Desc.StoreID); err != nil {
t.Fatal(err)
}
newLeasePtr, _ := replica0.GetLease()
if origLeasePtr != newLeasePtr || origLease != *newLeasePtr {
t.Fatalf("expected %+v, but found %+v", origLeasePtr, newLeasePtr)
}
}
{
// An invalid target should result in an error.
const expected = "unable to find store .* in range"
if err := replica0.AdminTransferLease(1000); !testutils.IsError(err, expected) {
t.Fatalf("expected %s, but found %v", expected, err)
}
}
// Move the lease to store 1.
var newHolderDesc roachpb.ReplicaDescriptor
util.SucceedsSoon(t, func() error {
var err error
newHolderDesc, err = replica1.GetReplicaDescriptor()
return err
})
if err := replica0.AdminTransferLease(newHolderDesc.StoreID); err != nil {
t.Fatal(err)
}
// Check that replica0 doesn't serve reads any more.
replica0Desc, err = replica0.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
_, pErr := client.SendWrappedWith(
context.Background(),
//.........這裏部分代碼省略.........
示例12: TestStoreRangeLeaseSwitcheroo
// TestStoreRangeLeaseSwitcheroo verifies that ranges can be switched
// between expiration and epoch and back.
func TestStoreRangeLeaseSwitcheroo(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := storage.TestStoreConfig(nil)
sc.EnableEpochRangeLeases = true
mtc := &multiTestContext{storeConfig: &sc}
defer mtc.Stop()
mtc.Start(t, 1)
splitKey := roachpb.Key("a")
splitArgs := adminSplitArgs(splitKey, splitKey)
if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil {
t.Fatal(pErr)
}
// Allow leases to expire and send commands to ensure we
// re-acquire, then check types again.
mtc.expireLeases()
if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil {
t.Fatalf("failed to increment: %s", err)
}
// We started with epoch ranges enabled, so verify we have an epoch lease.
repl := mtc.stores[0].LookupReplica(roachpb.RKey(splitKey), nil)
lease, _ := repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseEpoch {
t.Fatalf("expected lease type epoch; got %d", lt)
}
// Stop the store and reverse the epoch range lease setting.
mtc.stopStore(0)
sc.EnableEpochRangeLeases = false
mtc.restartStore(0)
mtc.expireLeases()
if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil {
t.Fatalf("failed to increment: %s", err)
}
// Verify we end up with an expiration lease on restart.
repl = mtc.stores[0].LookupReplica(roachpb.RKey(splitKey), nil)
lease, _ = repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("expected lease type expiration; got %d", lt)
}
// Now, one more time, switch back to epoch-based.
mtc.stopStore(0)
sc.EnableEpochRangeLeases = true
mtc.restartStore(0)
mtc.expireLeases()
if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil {
t.Fatalf("failed to increment: %s", err)
}
// Verify we end up with an epoch lease on restart.
repl = mtc.stores[0].LookupReplica(roachpb.RKey(splitKey), nil)
lease, _ = repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseEpoch {
t.Fatalf("expected lease type epoch; got %d", lt)
}
}
示例13: TestStoreRangeLease
// TestStoreRangeLease verifies that ranges after range 0 get
// epoch-based range leases if enabled and expiration-based
// otherwise.
func TestStoreRangeLease(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, enableEpoch := range []bool{true, false} {
t.Run(fmt.Sprintf("epoch-based leases? %t", enableEpoch), func(t *testing.T) {
sc := storage.TestStoreConfig(nil)
sc.EnableEpochRangeLeases = enableEpoch
mtc := &multiTestContext{storeConfig: &sc}
defer mtc.Stop()
mtc.Start(t, 1)
splitKeys := []roachpb.Key{roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c")}
for _, splitKey := range splitKeys {
splitArgs := adminSplitArgs(splitKey, splitKey)
if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil {
t.Fatal(pErr)
}
}
rLeft := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
lease, _ := rLeft.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("expected lease type expiration; got %d", lt)
}
// After the split, expect an expiration lease for other ranges.
for _, key := range splitKeys {
repl := mtc.stores[0].LookupReplica(roachpb.RKey(key), nil)
lease, _ = repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("%s: expected lease type epoch; got %d", key, lt)
}
}
// Allow leases to expire and send commands to ensure we
// re-acquire, then check types again.
mtc.expireLeases()
for _, key := range splitKeys {
if _, err := mtc.dbs[0].Inc(context.TODO(), key, 1); err != nil {
t.Fatalf("%s failed to increment: %s", key, err)
}
}
// After the expiration, expect an epoch lease for the RHS if
// we've enabled epoch based range leases.
for _, key := range splitKeys {
repl := mtc.stores[0].LookupReplica(roachpb.RKey(key), nil)
lease, _ = repl.GetLease()
if enableEpoch {
if lt := lease.Type(); lt != roachpb.LeaseEpoch {
t.Fatalf("expected lease type epoch; got %d", lt)
}
} else {
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("expected lease type expiration; got %d", lt)
}
}
}
})
}
}
示例14: TestStoreRangeMergeWithData
// TestStoreRangeMergeWithData attempts to merge two collocate ranges
// each containing data.
func TestStoreRangeMergeWithData(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
content := roachpb.Key("testing!")
aDesc, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values left and right of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Confirm the values are there.
gArgs := getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(context.Background(), rg1(store), &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
// Verify no intents remains on range descriptor keys.
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(aDesc.StartKey), keys.RangeDescriptorKey(bDesc.StartKey)} {
if _, _, err := engine.MVCCGet(context.Background(), store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
t.Fatal(err)
}
}
// Verify the merge by looking up keys from both ranges.
rangeA := store.LookupReplica([]byte("a"), nil)
rangeB := store.LookupReplica([]byte("c"), nil)
rangeADesc := rangeA.Desc()
rangeBDesc := rangeB.Desc()
if !reflect.DeepEqual(rangeA, rangeB) {
t.Fatalf("ranges were not merged %+v=%+v", rangeADesc, rangeBDesc)
}
if !bytes.Equal(rangeADesc.StartKey, roachpb.RKeyMin) {
t.Fatalf("The start key is not equal to KeyMin %q=%q", rangeADesc.StartKey, roachpb.RKeyMin)
}
if !bytes.Equal(rangeADesc.EndKey, roachpb.RKeyMax) {
t.Fatalf("The end key is not equal to KeyMax %q=%q", rangeADesc.EndKey, roachpb.RKeyMax)
}
// Try to get values from after the merge.
gArgs = getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(context.Background(), rg1(store), &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: rangeB.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Put new values after the merge on both sides.
pArgs = putArgs([]byte("aaaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
//.........這裏部分代碼省略.........
示例15: TestTimeSeriesMaintenanceQueue
// TestTimeSeriesMaintenanceQueue verifies shouldQueue and process method
// pass the correct data to the store's TimeSeriesData
func TestTimeSeriesMaintenanceQueue(t *testing.T) {
defer leaktest.AfterTest(t)()
model := &modelTimeSeriesDataStore{
t: t,
pruneSeenStartKeys: make(map[string]struct{}),
pruneSeenEndKeys: make(map[string]struct{}),
}
manual := hlc.NewManualClock(1)
cfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
cfg.TimeSeriesDataStore = model
cfg.TestingKnobs.DisableScanner = true
cfg.TestingKnobs.DisableSplitQueue = true
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithConfig(t, stopper, cfg)
// Generate several splits.
splitKeys := []roachpb.Key{roachpb.Key("c"), roachpb.Key("b"), roachpb.Key("a")}
for _, k := range splitKeys {
repl := store.LookupReplica(roachpb.RKey(k), nil)
args := adminSplitArgs(k, k)
if _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{
RangeID: repl.RangeID,
}, args); pErr != nil {
t.Fatal(pErr)
}
}
// Generate a list of start/end keys the model should have been passed by
// the queue. This consists of all split keys, with KeyMin as an additional
// start and KeyMax as an additional end.
expectedStartKeys := make(map[string]struct{})
expectedEndKeys := make(map[string]struct{})
expectedStartKeys[roachpb.KeyMin.String()] = struct{}{}
expectedEndKeys[roachpb.KeyMax.String()] = struct{}{}
for _, expected := range splitKeys {
expectedStartKeys[expected.String()] = struct{}{}
expectedEndKeys[expected.String()] = struct{}{}
}
// Wait for splits to complete and system config to be available.
util.SucceedsSoon(t, func() error {
if a, e := store.ReplicaCount(), len(expectedEndKeys); a != e {
return fmt.Errorf("expected %d replicas in store; found %d", a, e)
}
if _, ok := store.Gossip().GetSystemConfig(); !ok {
return fmt.Errorf("system config not yet available")
}
return nil
})
// Force replica scan to run, which will populate the model.
now := store.Clock().Now()
store.ForceTimeSeriesMaintenanceQueueProcess()
// Wait for processing to complete.
util.SucceedsSoon(t, func() error {
model.Lock()
defer model.Unlock()
if a, e := model.containsCalled, len(expectedStartKeys); a != e {
return fmt.Errorf("ContainsTimeSeries called %d times; expected %d", a, e)
}
if a, e := model.pruneCalled, len(expectedStartKeys); a != e {
return fmt.Errorf("PruneTimeSeries called %d times; expected %d", a, e)
}
return nil
})
model.Lock()
if a, e := model.pruneSeenStartKeys, expectedStartKeys; !reflect.DeepEqual(a, e) {
t.Errorf("start keys seen by PruneTimeSeries did not match expectation: %s", pretty.Diff(a, e))
}
if a, e := model.pruneSeenEndKeys, expectedEndKeys; !reflect.DeepEqual(a, e) {
t.Errorf("end keys seen by PruneTimeSeries did not match expectation: %s", pretty.Diff(a, e))
}
model.Unlock()
util.SucceedsSoon(t, func() error {
keys := []roachpb.RKey{roachpb.RKeyMin}
for _, k := range splitKeys {
keys = append(keys, roachpb.RKey(k))
}
for _, key := range keys {
repl := store.LookupReplica(key, nil)
ts, err := repl.GetQueueLastProcessed(context.TODO(), "timeSeriesMaintenance")
if err != nil {
return err
}
if ts.Less(now) {
return errors.Errorf("expected last processed %s > %s", ts, now)
}
}
return nil
})
//.........這裏部分代碼省略.........