当前位置: 首页>>代码示例>>Golang>>正文


Golang client.SendWrapped函数代码示例

本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/internal/client.SendWrapped函数的典型用法代码示例。如果您正苦于以下问题:Golang SendWrapped函数的具体用法?Golang SendWrapped怎么用?Golang SendWrapped使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了SendWrapped函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: TestLeaseNotUsedAfterRestart

// Test that leases held before a restart are not used after the restart.
// See replica.mu.minLeaseProposedTS for the reasons why this isn't allowed.
func TestLeaseNotUsedAfterRestart(t *testing.T) {
	defer leaktest.AfterTest(t)()
	sc := storage.TestStoreConfig(nil)
	var leaseAcquisitionTrap atomic.Value
	// Disable the split queue so that no ranges are split. This makes it easy
	// below to trap any lease request and infer that it refers to the range we're
	// interested in.
	sc.TestingKnobs.DisableSplitQueue = true
	sc.TestingKnobs.LeaseRequestEvent = func(ts hlc.Timestamp) {
		val := leaseAcquisitionTrap.Load()
		if val == nil {
			return
		}
		trapCallback := val.(func(ts hlc.Timestamp))
		if trapCallback != nil {
			trapCallback(ts)
		}
	}
	mtc := &multiTestContext{storeConfig: &sc}
	mtc.Start(t, 1)
	defer mtc.Stop()

	// Send a read, to acquire a lease.
	getArgs := getArgs([]byte("a"))
	if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
		t.Fatal(err)
	}

	// Restart the mtc. Before we do that, we're installing a callback used to
	// assert that a new lease has been requested. The callback is installed
	// before the restart, as the lease might be requested at any time and for
	// many reasons by background processes, even before we send the read below.
	leaseAcquisitionCh := make(chan error)
	var once sync.Once
	leaseAcquisitionTrap.Store(func(_ hlc.Timestamp) {
		once.Do(func() {
			close(leaseAcquisitionCh)
		})
	})
	mtc.restart()

	// Send another read and check that the pre-existing lease has not been used.
	// Concretely, we check that a new lease is requested.
	if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
		t.Fatal(err)
	}
	// Check that the Send above triggered a lease acquisition.
	select {
	case <-leaseAcquisitionCh:
	case <-time.After(time.Second):
		t.Fatalf("read did not acquire a new lease")
	}
}
开发者ID:bdarnell,项目名称:cockroach,代码行数:55,代码来源:client_replica_test.go

示例2: TestStoreRangeMergeStats

// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(123)
	storeCfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
	storeCfg.TestingKnobs.DisableSplitQueue = true
	store, stopper := createTestStoreWithConfig(t, storeCfg)
	defer stopper.Stop()

	// Split the range.
	aDesc, bDesc, pErr := createSplitRanges(store)
	if pErr != nil {
		t.Fatal(pErr)
	}

	// Write some values left and right of the proposed split key.
	writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
	writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))

	// Get the range stats for both ranges now that we have data.
	snap := store.Engine().NewSnapshot()
	defer snap.Close()
	msA, err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID)
	if err != nil {
		t.Fatal(err)
	}
	msB, err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID)
	if err != nil {
		t.Fatal(err)
	}

	// Stats should agree with recomputation.
	if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
		t.Fatalf("failed to verify range A's stats before split: %v", err)
	}
	if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
		t.Fatalf("failed to verify range B's stats before split: %v", err)
	}

	manual.Increment(100)

	// Merge the b range back into the a range.
	args := adminMergeArgs(roachpb.KeyMin)
	if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
		t.Fatal(err)
	}
	replMerged := store.LookupReplica(aDesc.StartKey, nil)

	// Get the range stats for the merged range and verify.
	snap = store.Engine().NewSnapshot()
	defer snap.Close()
	msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, replMerged.RangeID)
	if err != nil {
		t.Fatal(err)
	}

	// Merged stats should agree with recomputation.
	if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
		t.Errorf("failed to verify range's stats after merge: %v", err)
	}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:63,代码来源:client_merge_test.go

示例3: TestStoreRangeMergeTwoEmptyRanges

// TestStoreRangeMergeTwoEmptyRanges tries to merge two empty ranges together.
func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) {
	defer leaktest.AfterTest(t)()
	storeCfg := storage.TestStoreConfig(nil)
	storeCfg.TestingKnobs.DisableSplitQueue = true
	store, stopper := createTestStoreWithConfig(t, storeCfg)
	defer stopper.Stop()

	if _, _, err := createSplitRanges(store); err != nil {
		t.Fatal(err)
	}

	// Merge the b range back into the a range.
	args := adminMergeArgs(roachpb.KeyMin)
	_, err := client.SendWrapped(context.Background(), rg1(store), &args)
	if err != nil {
		t.Fatal(err)
	}

	// Verify the merge by looking up keys from both ranges.
	replicaA := store.LookupReplica([]byte("a"), nil)
	replicaB := store.LookupReplica([]byte("c"), nil)

	if !reflect.DeepEqual(replicaA, replicaB) {
		t.Fatalf("ranges were not merged %s!=%s", replicaA, replicaB)
	}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:27,代码来源:client_merge_test.go

示例4: TestStoreRangeMergeNonCollocated

// TestStoreRangeMergeNonCollocated attempts to merge two ranges
// that are not on the same stores.
func TestStoreRangeMergeNonCollocated(t *testing.T) {
	defer leaktest.AfterTest(t)()
	mtc := startMultiTestContext(t, 4)
	defer mtc.Stop()

	store := mtc.stores[0]

	// Split into 3 ranges
	argsSplit := adminSplitArgs(roachpb.KeyMin, []byte("d"))
	if _, pErr := client.SendWrapped(context.Background(), rg1(store), &argsSplit); pErr != nil {
		t.Fatalf("Can't split range %s", pErr)
	}
	argsSplit = adminSplitArgs(roachpb.KeyMin, []byte("b"))
	if _, pErr := client.SendWrapped(context.Background(), rg1(store), &argsSplit); pErr != nil {
		t.Fatalf("Can't split range %s", pErr)
	}

	rangeA := store.LookupReplica([]byte("a"), nil)
	rangeADesc := rangeA.Desc()
	rangeB := store.LookupReplica([]byte("c"), nil)
	rangeBDesc := rangeB.Desc()
	rangeC := store.LookupReplica([]byte("e"), nil)
	rangeCDesc := rangeC.Desc()

	if bytes.Equal(rangeADesc.StartKey, rangeBDesc.StartKey) {
		log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeBDesc.StartKey)
	}
	if bytes.Equal(rangeBDesc.StartKey, rangeCDesc.StartKey) {
		log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeBDesc.StartKey, rangeCDesc.StartKey)
	}
	if bytes.Equal(rangeADesc.StartKey, rangeCDesc.StartKey) {
		log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeCDesc.StartKey)
	}

	// Replicate the ranges to different sets of stores. Ranges A and C
	// are collocated, but B is different.
	mtc.replicateRange(rangeA.RangeID, 1, 2)
	mtc.replicateRange(rangeB.RangeID, 1, 3)
	mtc.replicateRange(rangeC.RangeID, 1, 2)

	// Attempt to merge.
	rangeADesc = rangeA.Desc()
	argsMerge := adminMergeArgs(roachpb.Key(rangeADesc.StartKey))
	if _, pErr := rangeA.AdminMerge(context.Background(), argsMerge, rangeADesc); !testutils.IsPError(pErr, "ranges not collocated") {
		t.Fatalf("did not got expected error; got %s", pErr)
	}
}
开发者ID:knz,项目名称:cockroach,代码行数:49,代码来源:client_merge_test.go

示例5: TestStoreRangeMergeLastRange

// TestStoreRangeMergeLastRange verifies that merging the last range
// fails.
func TestStoreRangeMergeLastRange(t *testing.T) {
	defer leaktest.AfterTest(t)()
	storeCfg := storage.TestStoreConfig(nil)
	storeCfg.TestingKnobs.DisableSplitQueue = true
	store, stopper := createTestStoreWithConfig(t, storeCfg)
	defer stopper.Stop()

	// Merge last range.
	args := adminMergeArgs(roachpb.KeyMin)
	if _, pErr := client.SendWrapped(context.Background(), rg1(store), &args); !testutils.IsPError(pErr, "cannot merge final range") {
		t.Fatalf("expected 'cannot merge final range' error; got %s", pErr)
	}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:15,代码来源:client_merge_test.go

示例6: BenchmarkStoreRangeMerge

func BenchmarkStoreRangeMerge(b *testing.B) {
	defer tracing.Disable()()
	storeCfg := storage.TestStoreConfig(nil)
	storeCfg.TestingKnobs.DisableSplitQueue = true
	stopper := stop.NewStopper()
	defer stopper.Stop()
	store := createTestStoreWithConfig(b, stopper, storeCfg)

	// Perform initial split of ranges.
	sArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
	if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil {
		b.Fatal(err)
	}

	// Write some values left and right of the proposed split key.
	aDesc := store.LookupReplica([]byte("a"), nil).Desc()
	bDesc := store.LookupReplica([]byte("c"), nil).Desc()
	writeRandomDataToRange(b, store, aDesc.RangeID, []byte("aaa"))
	writeRandomDataToRange(b, store, bDesc.RangeID, []byte("ccc"))

	// Create args to merge the b range back into the a range.
	mArgs := adminMergeArgs(roachpb.KeyMin)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		// Merge the ranges.
		b.StartTimer()
		if _, err := client.SendWrapped(context.Background(), rg1(store), mArgs); err != nil {
			b.Fatal(err)
		}

		// Split the range.
		b.StopTimer()
		if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil {
			b.Fatal(err)
		}
	}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:38,代码来源:client_merge_test.go

示例7: lookupRange

func (c *Cluster) lookupRange(nodeIdx int, key roachpb.Key) (*roachpb.RangeDescriptor, error) {
	req := &roachpb.RangeLookupRequest{
		Span: roachpb.Span{
			Key: keys.RangeMetaKey(keys.MustAddr(key)),
		},
		MaxRanges: 1,
	}
	sender := c.Clients[nodeIdx].GetSender()
	resp, pErr := client.SendWrapped(context.Background(), sender, req)
	if pErr != nil {
		return nil, errors.Errorf("%s: lookup range: %s", key, pErr)
	}
	return &resp.(*roachpb.RangeLookupResponse).Ranges[0], nil
}
开发者ID:knz,项目名称:cockroach,代码行数:14,代码来源:localcluster.go

示例8: LookupRange

// LookupRange returns the descriptor of the range containing key.
func (ts *TestServer) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) {
	rangeLookupReq := roachpb.RangeLookupRequest{
		Span: roachpb.Span{
			Key: keys.RangeMetaKey(keys.MustAddr(key)),
		},
		MaxRanges: 1,
	}
	resp, pErr := client.SendWrapped(context.Background(), ts.DistSender(), &rangeLookupReq)
	if pErr != nil {
		return roachpb.RangeDescriptor{}, errors.Errorf(
			"%q: lookup range unexpected error: %s", key, pErr)
	}
	return resp.(*roachpb.RangeLookupResponse).Ranges[0], nil
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:15,代码来源:testserver.go

示例9: SplitRange

// SplitRange splits the range containing splitKey.
// The right range created by the split starts at the split key and extends to the
// original range's end key.
// Returns the new descriptors of the left and right ranges.
//
// splitKey must correspond to a SQL table key (it must end with a family ID /
// col ID).
func (ts *TestServer) SplitRange(
	splitKey roachpb.Key,
) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) {
	splitRKey, err := keys.Addr(splitKey)
	if err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, err
	}
	origRangeDesc, err := ts.LookupRange(splitKey)
	if err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, err
	}
	if origRangeDesc.StartKey.Equal(splitRKey) {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Errorf(
				"cannot split range %+v at start key %q", origRangeDesc, splitKey)
	}
	splitReq := roachpb.AdminSplitRequest{
		Span: roachpb.Span{
			Key: splitKey,
		},
		SplitKey: splitKey,
	}
	_, pErr := client.SendWrapped(context.Background(), ts.DistSender(), &splitReq)
	if pErr != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Errorf(
				"%q: split unexpected error: %s", splitReq.SplitKey, pErr)
	}

	var leftRangeDesc, rightRangeDesc roachpb.RangeDescriptor
	if err := ts.DB().GetProto(context.TODO(),
		keys.RangeDescriptorKey(origRangeDesc.StartKey), &leftRangeDesc); err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Wrap(err, "could not look up left-hand side descriptor")
	}
	// The split point might not be exactly the one we requested (it can be
	// adjusted slightly so we don't split in the middle of SQL rows). Update it
	// to the real point.
	splitRKey = leftRangeDesc.EndKey
	if err := ts.DB().GetProto(context.TODO(),
		keys.RangeDescriptorKey(splitRKey), &rightRangeDesc); err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Wrap(err, "could not look up right-hand side descriptor")
	}
	return leftRangeDesc, rightRangeDesc, nil
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:53,代码来源:testserver.go

示例10: TestProactiveRaftLogTruncate

// TestProactiveRaftLogTruncate verifies that we proactively truncate the raft
// log even when replica scanning is disabled.
func TestProactiveRaftLogTruncate(t *testing.T) {
	defer leaktest.AfterTest(t)()
	t.Skip("#9772")

	store, _, stopper := createTestStore(t)
	defer stopper.Stop()

	store.SetReplicaScannerActive(false)

	r, err := store.GetReplica(1)
	if err != nil {
		t.Fatal(err)
	}

	r.mu.Lock()
	oldFirstIndex, err := r.FirstIndex()
	r.mu.Unlock()
	if err != nil {
		t.Fatal(err)
	}

	// Write a few keys to the range. While writing these keys, the raft log
	// should be proactively truncated even though replica scanning is disabled.
	for i := 0; i < 2*RaftLogQueueStaleThreshold; i++ {
		key := roachpb.Key(fmt.Sprintf("key%02d", i))
		args := putArgs(key, []byte(fmt.Sprintf("value%02d", i)))
		if _, err := client.SendWrapped(context.Background(), store.testSender(), &args); err != nil {
			t.Fatal(err)
		}
	}

	// Wait for any asynchronous tasks to finish.
	stopper.Quiesce()

	r.mu.Lock()
	newFirstIndex, err := r.FirstIndex()
	r.mu.Unlock()
	if err != nil {
		t.Fatal(err)
	}

	if newFirstIndex <= oldFirstIndex {
		t.Errorf("log was not correctly truncated, old first index:%d, current first index:%d",
			oldFirstIndex, newFirstIndex)
	}
}
开发者ID:hvaara,项目名称:cockroach,代码行数:48,代码来源:raft_log_queue_test.go

示例11: createSplitRanges

func createSplitRanges(
	store *storage.Store,
) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, *roachpb.Error) {
	args := adminSplitArgs(roachpb.KeyMin, []byte("b"))
	if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
		return nil, nil, err
	}

	rangeADesc := store.LookupReplica([]byte("a"), nil).Desc()
	rangeBDesc := store.LookupReplica([]byte("c"), nil).Desc()

	if bytes.Equal(rangeADesc.StartKey, rangeBDesc.StartKey) {
		log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeBDesc.StartKey)
	}

	return rangeADesc, rangeBDesc, nil
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:17,代码来源:client_merge_test.go

示例12: TestServerStartClock

// TestServerStartClock tests that a server's clock is not pushed out of thin
// air. This used to happen - the simple act of starting was causing a server's
// clock to be pushed because we were introducing bogus future timestamps into
// our system.
func TestServerStartClock(t *testing.T) {
	defer leaktest.AfterTest(t)()

	// Set a high max-offset so that, if the server's clock is pushed by
	// MaxOffset, we don't hide that under the latency of the Start operation
	// which would allow the physical clock to catch up to the pushed one.
	params := base.TestServerArgs{
		Knobs: base.TestingKnobs{
			Store: &storage.StoreTestingKnobs{
				MaxOffset: time.Second,
			},
		},
	}
	s, _, _ := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	// Run a command so that we are sure to touch the timestamp cache. This is
	// actually not needed because other commands run during server
	// initialization, but we cannot guarantee that's going to stay that way.
	get := &roachpb.GetRequest{
		Span: roachpb.Span{Key: roachpb.Key("a")},
	}
	if _, err := client.SendWrapped(
		context.Background(), s.KVClient().(*client.DB).GetSender(), get,
	); err != nil {
		t.Fatal(err)
	}

	now := s.Clock().Now()
	// We rely on s.Clock() having been initialized from hlc.UnixNano(), which is a
	// bit fragile.
	physicalNow := hlc.UnixNano()
	serverClockWasPushed := (now.Logical > 0) || (now.WallTime > physicalNow)
	if serverClockWasPushed {
		t.Fatalf("time: server %s vs actual %d", now, physicalNow)
	}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:41,代码来源:server_test.go

示例13: TestStoreMetrics

func TestStoreMetrics(t *testing.T) {
	defer leaktest.AfterTest(t)()
	t.Skip("TODO(mrtracy): #9204")

	mtc := &multiTestContext{}
	defer mtc.Stop()
	mtc.Start(t, 3)

	// Flush RocksDB memtables, so that RocksDB begins using block-based tables.
	// This is useful, because most of the stats we track don't apply to
	// memtables.
	if err := mtc.stores[0].Engine().Flush(); err != nil {
		t.Fatal(err)
	}
	if err := mtc.stores[1].Engine().Flush(); err != nil {
		t.Fatal(err)
	}

	// Disable the raft log truncation which confuses this test.
	for _, s := range mtc.stores {
		s.SetRaftLogQueueActive(false)
	}

	// Perform a split, which has special metrics handling.
	splitArgs := adminSplitArgs(roachpb.KeyMin, roachpb.Key("m"))
	if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), splitArgs); err != nil {
		t.Fatal(err)
	}

	// Verify range count is as expected
	checkCounter(t, mtc.stores[0].Metrics().ReplicaCount, 2)

	// Verify all stats on store0 after split.
	verifyStats(t, mtc, 0)

	// Replicate the "right" range to the other stores.
	replica := mtc.stores[0].LookupReplica(roachpb.RKey("z"), nil)
	mtc.replicateRange(replica.RangeID, 1, 2)

	// Verify stats on store1 after replication.
	verifyStats(t, mtc, 1)

	// Add some data to the "right" range.
	dataKey := []byte("z")
	if _, err := mtc.dbs[0].Inc(context.TODO(), dataKey, 5); err != nil {
		t.Fatal(err)
	}
	mtc.waitForValues(roachpb.Key("z"), []int64{5, 5, 5})

	// Verify all stats on store 0 and 1 after addition.
	verifyStats(t, mtc, 0, 1)

	// Create a transaction statement that fails, but will add an entry to the
	// sequence cache. Regression test for #4969.
	if err := mtc.dbs[0].Txn(context.TODO(), func(txn *client.Txn) error {
		b := txn.NewBatch()
		b.CPut(dataKey, 7, 6)
		return txn.Run(b)
	}); err == nil {
		t.Fatal("Expected transaction error, but none received")
	}

	// Verify stats after sequence cache addition.
	verifyStats(t, mtc, 0)
	checkCounter(t, mtc.stores[0].Metrics().ReplicaCount, 2)

	// Unreplicate range from the first store.
	mtc.unreplicateRange(replica.RangeID, 0)

	// Force GC Scan on store 0 in order to fully remove range.
	mtc.stores[1].ForceReplicaGCScanAndProcess()
	mtc.waitForValues(roachpb.Key("z"), []int64{0, 5, 5})

	// Verify range count is as expected.
	checkCounter(t, mtc.stores[0].Metrics().ReplicaCount, 1)
	checkCounter(t, mtc.stores[1].Metrics().ReplicaCount, 1)

	// Verify all stats on store0 and store1 after range is removed.
	verifyStats(t, mtc, 0, 1)

	verifyRocksDBStats(t, mtc.stores[0])
	verifyRocksDBStats(t, mtc.stores[1])
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:83,代码来源:client_metrics_test.go

示例14: TestGetTruncatableIndexes

// TestGetTruncatableIndexes verifies that old raft log entries are correctly
// removed.
func TestGetTruncatableIndexes(t *testing.T) {
	defer leaktest.AfterTest(t)()
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()
	store.SetRaftLogQueueActive(false)

	r, err := store.GetReplica(1)
	if err != nil {
		t.Fatal(err)
	}

	getIndexes := func() (uint64, uint64, uint64, error) {
		r.mu.Lock()
		firstIndex, err := r.FirstIndex()
		r.mu.Unlock()
		if err != nil {
			return 0, 0, 0, err
		}
		truncatableIndexes, oldestIndex, err := getTruncatableIndexes(context.Background(), r)
		if err != nil {
			return 0, 0, 0, err
		}
		return firstIndex, truncatableIndexes, oldestIndex, nil
	}

	aFirst, aTruncatable, aOldest, err := getIndexes()
	if err != nil {
		t.Fatal(err)
	}
	if aFirst == 0 {
		t.Errorf("expected first index to be greater than 0, got %d", aFirst)
	}

	// Write a few keys to the range.
	for i := 0; i < RaftLogQueueStaleThreshold+1; i++ {
		key := roachpb.Key(fmt.Sprintf("key%02d", i))
		args := putArgs(key, []byte(fmt.Sprintf("value%02d", i)))
		if _, err := client.SendWrapped(context.Background(), store.testSender(), &args); err != nil {
			t.Fatal(err)
		}
	}

	bFirst, bTruncatable, bOldest, err := getIndexes()
	if err != nil {
		t.Fatal(err)
	}
	if aFirst != bFirst {
		t.Errorf("expected firstIndex to not change, instead it changed from %d -> %d", aFirst, bFirst)
	}
	if aTruncatable >= bTruncatable {
		t.Errorf("expected truncatableIndexes to increase, instead it changed from %d -> %d", aTruncatable, bTruncatable)
	}
	if aOldest >= bOldest {
		t.Errorf("expected oldestIndex to increase, instead it changed from %d -> %d", aOldest, bOldest)
	}

	// Enable the raft log scanner and and force a truncation.
	store.SetRaftLogQueueActive(true)
	store.ForceRaftLogScanAndProcess()
	store.SetRaftLogQueueActive(false)

	// There can be a delay from when the truncation command is issued and the
	// indexes updating.
	var cFirst, cTruncatable, cOldest uint64
	util.SucceedsSoon(t, func() error {
		var err error
		cFirst, cTruncatable, cOldest, err = getIndexes()
		if err != nil {
			t.Fatal(err)
		}
		if bFirst == cFirst {
			return errors.Errorf("truncation did not occur, expected firstIndex to change, instead it remained at %d", cFirst)
		}
		return nil
	})
	if bTruncatable < cTruncatable {
		t.Errorf("expected truncatableIndexes to decrease, instead it changed from %d -> %d", bTruncatable, cTruncatable)
	}
	if bOldest >= cOldest {
		t.Errorf("expected oldestIndex to increase, instead it changed from %d -> %d", bOldest, cOldest)
	}

	// Again, enable the raft log scanner and and force a truncation. This time
	// we expect no truncation to occur.
	store.SetRaftLogQueueActive(true)
	store.ForceRaftLogScanAndProcess()
	store.SetRaftLogQueueActive(false)

	// Unlike the last iteration, where we expect a truncation and can wait on
	// it with succeedsSoon, we can't do that here. This check is fragile in
	// that the truncation triggered here may lose the race against the call to
	// GetFirstIndex or getTruncatableIndexes, giving a false negative. Fixing
	// this requires additional instrumentation of the queues, which was deemed
	// to require too much work at the time of this writing.
	dFirst, dTruncatable, dOldest, err := getIndexes()
	if err != nil {
		t.Fatal(err)
	}
//.........这里部分代码省略.........
开发者ID:hvaara,项目名称:cockroach,代码行数:101,代码来源:raft_log_queue_test.go

示例15: TestStoreRangeMergeMetadataCleanup

// TestStoreRangeMergeMetadataCleanup tests that all metadata of a
// subsumed range is cleaned up on merge.
func TestStoreRangeMergeMetadataCleanup(t *testing.T) {
	defer leaktest.AfterTest(t)()
	storeCfg := storage.TestStoreConfig(nil)
	storeCfg.TestingKnobs.DisableSplitQueue = true
	store, stopper := createTestStoreWithConfig(t, storeCfg)
	defer stopper.Stop()

	scan := func(f func(roachpb.KeyValue) (bool, error)) {
		if _, err := engine.MVCCIterate(context.Background(), store.Engine(), roachpb.KeyMin, roachpb.KeyMax, hlc.ZeroTimestamp, true, nil, false, f); err != nil {
			t.Fatal(err)
		}
	}
	content := roachpb.Key("testing!")

	// Write some values left of the proposed split key.
	pArgs := putArgs([]byte("aaa"), content)
	if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
		t.Fatal(err)
	}

	// Collect all the keys.
	preKeys := make(map[string]struct{})
	scan(func(kv roachpb.KeyValue) (bool, error) {
		preKeys[string(kv.Key)] = struct{}{}
		return false, nil
	})

	// Split the range.
	_, bDesc, err := createSplitRanges(store)
	if err != nil {
		t.Fatal(err)
	}

	// Write some values right of the split key.
	pArgs = putArgs([]byte("ccc"), content)
	if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
		RangeID: bDesc.RangeID,
	}, &pArgs); err != nil {
		t.Fatal(err)
	}

	// Merge the b range back into the a range.
	args := adminMergeArgs(roachpb.KeyMin)
	if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
		t.Fatal(err)
	}

	// Collect all the keys again.
	postKeys := make(map[string]struct{})
	scan(func(kv roachpb.KeyValue) (bool, error) {
		postKeys[string(kv.Key)] = struct{}{}
		return false, nil
	})

	// Compute the new keys.
	for k := range preKeys {
		delete(postKeys, k)
	}

	// Keep only the subsumed range's local keys.
	localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(bDesc.RangeID))
	for k := range postKeys {
		if !strings.HasPrefix(k, localRangeKeyPrefix) {
			delete(postKeys, k)
		}
	}

	if numKeys := len(postKeys); numKeys > 0 {
		var buf bytes.Buffer
		fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys)
		for k := range postKeys {
			fmt.Fprintf(&buf, "%q\n", k)
		}
		t.Fatal(buf.String())
	}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:78,代码来源:client_merge_test.go


注:本文中的github.com/cockroachdb/cockroach/pkg/internal/client.SendWrapped函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。