当前位置: 首页>>代码示例>>Golang>>正文


Golang testcluster.StartTestCluster函数代码示例

本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/testutils/testcluster.StartTestCluster函数的典型用法代码示例。如果您正苦于以下问题:Golang StartTestCluster函数的具体用法?Golang StartTestCluster怎么用?Golang StartTestCluster使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了StartTestCluster函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: TestGossipHandlesReplacedNode

// TestGossipHandlesReplacedNode tests that we can shut down a node and
// replace it with a new node at the same address (simulating a node getting
// restarted after losing its data) without the cluster breaking.
func TestGossipHandlesReplacedNode(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ctx := context.Background()

	tc := testcluster.StartTestCluster(t, 3,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationAuto,
		})
	defer tc.Stopper().Stop()

	// Take down the first node of the cluster and replace it with a new one.
	// We replace the first node rather than the second or third to be adversarial
	// because it typically has the most leases on it.
	oldNodeIdx := 0
	newServerArgs := base.TestServerArgs{
		Addr:          tc.Servers[oldNodeIdx].ServingAddr(),
		PartOfCluster: true,
		JoinAddr:      tc.Servers[1].ServingAddr(),
	}
	tc.StopServer(oldNodeIdx)
	tc.AddServer(t, newServerArgs)
	tc.WaitForStores(t, tc.Server(1).Gossip())

	// Ensure that all servers still running are responsive. If the two remaining
	// original nodes don't refresh their connection to the address of the first
	// node, they can get stuck here.
	for i := 1; i < 4; i++ {
		kvClient := tc.Server(i).KVClient().(*client.DB)
		if err := kvClient.Put(ctx, fmt.Sprintf("%d", i), i); err != nil {
			t.Errorf("failed Put to node %d: %s", i, err)
		}
	}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:36,代码来源:gossip_test.go

示例2: TestSplitAtTableBoundary

func TestSplitAtTableBoundary(t *testing.T) {
	defer leaktest.AfterTest(t)()

	testClusterArgs := base.TestClusterArgs{
		ReplicationMode: base.ReplicationAuto,
	}
	tc := testcluster.StartTestCluster(t, 3, testClusterArgs)
	defer tc.Stopper().Stop()

	runner := sqlutils.MakeSQLRunner(t, tc.Conns[0])
	runner.Exec(`CREATE DATABASE test`)
	runner.Exec(`CREATE TABLE test.t (k SERIAL PRIMARY KEY, v INT)`)

	const tableIDQuery = `
SELECT tables.id FROM system.namespace tables
  JOIN system.namespace dbs ON dbs.id = tables.parentid
  WHERE dbs.name = $1 AND tables.name = $2
`
	var tableID uint32
	runner.QueryRow(tableIDQuery, "test", "t").Scan(&tableID)
	tableStartKey := keys.MakeTablePrefix(tableID)

	// Wait for new table to split.
	testutils.SucceedsSoon(t, func() error {
		desc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey))
		if err != nil {
			t.Fatal(err)
		}
		if !desc.StartKey.Equal(tableStartKey) {
			log.Infof(context.TODO(), "waiting on split results")
			return errors.Errorf("expected range start key %s; got %s", tableStartKey, desc.StartKey)
		}
		return nil
	})
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:35,代码来源:table_split_test.go

示例3: backupRestoreTestSetup

func backupRestoreTestSetup(
	t testing.TB, numAccounts int,
) (
	ctx context.Context,
	tempDir string,
	tc *testcluster.TestCluster,
	kvDB *client.DB,
	sqlDB *sqlutils.SQLRunner,
	cleanup func(),
) {
	ctx = context.Background()

	dir, dirCleanupFn := testutils.TempDir(t, 1)

	// Use ReplicationManual so we can force full replication, which is needed
	// to later move the leases around.
	tc = testcluster.StartTestCluster(t, backupRestoreClusterSize, base.TestClusterArgs{
		ReplicationMode: base.ReplicationManual,
	})
	sqlDB = sqlutils.MakeSQLRunner(t, tc.Conns[0])
	kvDB = tc.Server(0).KVClient().(*client.DB)

	sqlDB.Exec(bankCreateDatabase)
	sqlDB.Exec(bankCreateTable)
	for _, insert := range bankDataInsertStmts(numAccounts) {
		sqlDB.Exec(insert)
	}
	for _, split := range bankSplitStmts(numAccounts, backupRestoreDefaultRanges) {
		sqlDB.Exec(split)
	}

	targets := make([]testcluster.ReplicationTarget, backupRestoreClusterSize-1)
	for i := 1; i < backupRestoreClusterSize; i++ {
		targets[i-1] = tc.Target(i)
	}
	txn := client.NewTxn(ctx, *kvDB)
	rangeDescs, err := sql.AllRangeDescriptors(txn)
	if err != nil {
		t.Fatal(err)
	}
	for _, r := range rangeDescs {
		if _, err := tc.AddReplicas(r.StartKey.AsRawKey(), targets...); err != nil {
			t.Fatal(err)
		}
	}

	cleanupFn := func() {
		tc.Stopper().Stop()
		dirCleanupFn()
	}

	return ctx, dir, tc, kvDB, sqlDB, cleanupFn
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:53,代码来源:backup_test.go

示例4: TestParallelCreateTables

// TestParallelCreateTables tests that concurrent create table requests are
// correctly filled.
func TestParallelCreateTables(t *testing.T) {
	defer leaktest.AfterTest(t)()

	// This number has to be around 10 or else testrace will take too long to
	// finish.
	const numberOfTables = 10
	const numberOfNodes = 3

	tc := testcluster.StartTestCluster(t, numberOfNodes, base.TestClusterArgs{})
	defer tc.Stopper().Stop()

	if _, err := tc.ServerConn(0).Exec(`CREATE DATABASE "test"`); err != nil {
		t.Fatal(err)
	}
	// Get the id descriptor generator count.
	kvDB := tc.Servers[0].KVClient().(*client.DB)
	var descIDStart int64
	if descID, err := kvDB.Get(context.Background(), keys.DescIDGenerator); err != nil {
		t.Fatal(err)
	} else {
		descIDStart = descID.ValueInt()
	}

	var wgStart sync.WaitGroup
	var wgEnd sync.WaitGroup
	wgStart.Add(numberOfTables)
	wgEnd.Add(numberOfTables)
	signal := make(chan struct{})
	completed := make(chan int, numberOfTables)
	for i := 0; i < numberOfTables; i++ {
		db := tc.ServerConn(i % numberOfNodes)
		go createTestTable(t, tc, i, db, &wgStart, &wgEnd, signal, completed)
	}

	// Wait until all goroutines are ready.
	wgStart.Wait()
	// Signal the create table goroutines to start.
	close(signal)
	// Wait until all create tables are finished.
	wgEnd.Wait()
	close(completed)

	verifyTables(
		t,
		tc,
		completed,
		numberOfTables,
		descIDStart,
	)
}
开发者ID:knz,项目名称:cockroach,代码行数:52,代码来源:create_test.go

示例5: TestGossipHandlesReplacedNode

// TestGossipHandlesReplacedNode tests that we can shut down a node and
// replace it with a new node at the same address (simulating a node getting
// restarted after losing its data) without the cluster breaking.
func TestGossipHandlesReplacedNode(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ctx := context.Background()

	// Shorten the raft tick interval and election timeout to make range leases
	// much shorter than normal. This keeps us from having to wait so long for
	// the replaced node's leases to time out, but has still shown itself to be
	// long enough to avoid flakes.
	serverArgs := base.TestServerArgs{
		RaftTickInterval:         10 * time.Millisecond,
		RaftElectionTimeoutTicks: 10,
		RetryOptions: retry.Options{
			InitialBackoff: 10 * time.Millisecond,
			MaxBackoff:     50 * time.Millisecond,
		},
	}

	tc := testcluster.StartTestCluster(t, 3,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationAuto,
			ServerArgs:      serverArgs,
		})
	defer tc.Stopper().Stop()

	// Take down a node other than the first node and replace it with a new one.
	// Replacing the first node would be better from an adversarial testing
	// perspective because it typically has the most leases on it, but that also
	// causes the test to take significantly longer as a result.
	oldNodeIdx := 0
	newServerArgs := serverArgs
	newServerArgs.Addr = tc.Servers[oldNodeIdx].ServingAddr()
	newServerArgs.PartOfCluster = true
	newServerArgs.JoinAddr = tc.Servers[1].ServingAddr()
	tc.StopServer(oldNodeIdx)
	tc.AddServer(t, newServerArgs)
	tc.WaitForStores(t, tc.Server(1).Gossip())

	// Ensure that all servers still running are responsive. If the two remaining
	// original nodes don't refresh their connection to the address of the first
	// node, they can get stuck here.
	for i, server := range tc.Servers {
		if i == oldNodeIdx {
			continue
		}
		kvClient := server.KVClient().(*client.DB)
		if err := kvClient.Put(ctx, fmt.Sprintf("%d", i), i); err != nil {
			t.Errorf("failed Put to node %d: %s", i, err)
		}
	}
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:53,代码来源:gossip_test.go

示例6: benchmarkMultinodeCockroach

func benchmarkMultinodeCockroach(b *testing.B, f func(b *testing.B, db *gosql.DB)) {
	defer tracing.Disable()()
	tc := testcluster.StartTestCluster(b, 3,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationAuto,
			ServerArgs: base.TestServerArgs{
				UseDatabase: "bench",
			},
		})
	if _, err := tc.Conns[0].Exec(`CREATE DATABASE bench`); err != nil {
		b.Fatal(err)
	}
	defer tc.Stopper().Stop()

	f(b, tc.Conns[0])
}
开发者ID:hvaara,项目名称:cockroach,代码行数:16,代码来源:bench_test.go

示例7: TestBackupRestoreOnce

func TestBackupRestoreOnce(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// TODO(dan): Actually invalidate the descriptor cache and delete this line.
	defer sql.TestDisableTableLeases()()
	const numAccounts = 1000

	ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(t, numAccounts)
	defer cleanupFn()

	{
		desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now())
		if err != nil {
			t.Fatal(err)
		}
		approxDataSize := int64(backupRestoreRowPayloadSize) * numAccounts
		if max := approxDataSize * 2; desc.DataSize < approxDataSize || desc.DataSize > 2*max {
			t.Errorf("expected data size in [%d,%d] but was %d", approxDataSize, max, desc.DataSize)
		}
	}

	// Start a new cluster to restore into.
	{
		tcRestore := testcluster.StartTestCluster(t, backupRestoreClusterSize, base.TestClusterArgs{})
		defer tcRestore.Stopper().Stop()
		sqlDBRestore := sqlutils.MakeSQLRunner(t, tcRestore.Conns[0])
		kvDBRestore := tcRestore.Server(0).KVClient().(*client.DB)

		// Restore assumes the database exists.
		sqlDBRestore.Exec(bankCreateDatabase)

		table := parser.TableName{DatabaseName: "bench", TableName: "bank"}
		if _, err := sql.Restore(ctx, *kvDBRestore, dir, table); err != nil {
			t.Fatal(err)
		}

		var rowCount int
		sqlDBRestore.QueryRow(`SELECT COUNT(*) FROM bench.bank`).Scan(&rowCount)
		if rowCount != numAccounts {
			t.Fatalf("expected %d rows but found %d", numAccounts, rowCount)
		}
	}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:42,代码来源:backup_test.go

示例8: TestReplicateQueueRebalance

func TestReplicateQueueRebalance(t *testing.T) {
	defer leaktest.AfterTest(t)()

	// Set the gossip stores interval lower to speed up rebalancing. With the
	// default of 5s we have to wait ~5s for the rebalancing to start.
	defer func(v time.Duration) {
		gossip.GossipStoresInterval = v
	}(gossip.GossipStoresInterval)
	gossip.GossipStoresInterval = 100 * time.Millisecond

	// TODO(peter): Remove when lease rebalancing is the default.
	defer func(v bool) {
		storage.EnableLeaseRebalancing = v
	}(storage.EnableLeaseRebalancing)
	storage.EnableLeaseRebalancing = true

	const numNodes = 5
	tc := testcluster.StartTestCluster(t, numNodes,
		base.TestClusterArgs{ReplicationMode: base.ReplicationAuto},
	)
	defer tc.Stopper().Stop()

	const newRanges = 5
	for i := 0; i < newRanges; i++ {
		tableID := keys.MaxReservedDescID + i + 1
		splitKey := keys.MakeRowSentinelKey(keys.MakeTablePrefix(uint32(tableID)))
		for {
			if _, _, err := tc.SplitRange(splitKey); err != nil {
				if testutils.IsError(err, "split at key .* failed: conflict updating range descriptors") ||
					testutils.IsError(err, "range is already split at key") {
					continue
				}
				t.Fatal(err)
			}
			break
		}
	}

	countReplicas := func() []int {
		counts := make([]int, len(tc.Servers))
		for _, s := range tc.Servers {
			err := s.Stores().VisitStores(func(s *storage.Store) error {
				counts[s.StoreID()-1] += s.ReplicaCount()
				return nil
			})
			if err != nil {
				t.Fatal(err)
			}
		}
		return counts
	}

	numRanges := newRanges + server.ExpectedInitialRangeCount()
	numReplicas := numRanges * 3
	const minThreshold = 0.9
	minReplicas := int(math.Floor(minThreshold * (float64(numReplicas) / numNodes)))

	util.SucceedsSoon(t, func() error {
		counts := countReplicas()
		for _, c := range counts {
			if c < minReplicas {
				err := errors.Errorf("not balanced: %d", counts)
				log.Info(context.Background(), err)
				return err
			}
		}
		return nil
	})
}
开发者ID:hvaara,项目名称:cockroach,代码行数:69,代码来源:replicate_queue_test.go

示例9: TestReplicateQueueDownReplicate

// TestReplicateQueueDownReplicate verifies that the replication queue will notice
// over-replicated ranges and remove replicas from them.
func TestReplicateQueueDownReplicate(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const replicaCount = 3

	tc := testcluster.StartTestCluster(t, replicaCount+2,
		base.TestClusterArgs{ReplicationMode: base.ReplicationAuto},
	)
	defer tc.Stopper().Stop()

	// Split off a range from the initial range for testing; there are
	// complications if the metadata ranges are moved.
	testKey := roachpb.Key("m")
	if _, _, err := tc.SplitRange(testKey); err != nil {
		t.Fatal(err)
	}

	desc, err := tc.LookupRange(testKey)
	if err != nil {
		t.Fatal(err)
	}
	rangeID := desc.RangeID

	countReplicas := func() int {
		count := 0
		for _, s := range tc.Servers {
			if err := s.Stores().VisitStores(func(store *storage.Store) error {
				if _, err := store.GetReplica(rangeID); err == nil {
					count++
				}
				return nil
			}); err != nil {
				t.Fatal(err)
			}
		}
		return count
	}

	// Up-replicate the new range to all servers to create redundant replicas.
	// Add replicas to all of the nodes. Only 2 of these calls will succeed
	// because the range is already replicated to the other 3 nodes.
	util.SucceedsSoon(t, func() error {
		for i := 0; i < tc.NumServers(); i++ {
			_, err := tc.AddReplicas(testKey, tc.Target(i))
			if err != nil {
				if testutils.IsError(err, "unable to add replica .* which is already present") {
					continue
				}
				return err
			}
		}
		if c := countReplicas(); c != tc.NumServers() {
			return errors.Errorf("replica count = %d", c)
		}
		return nil
	})

	// Ensure that the replicas for the new range down replicate.
	util.SucceedsSoon(t, func() error {
		if c := countReplicas(); c != replicaCount {
			return errors.Errorf("replica count = %d", c)
		}
		return nil
	})
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:67,代码来源:replicate_queue_test.go

示例10: TestSpanResolverUsesCaches

// Test that resolving spans uses a node's range cache and lease holder cache.
// The idea is to test that resolving is not random, but predictable given the
// state of caches.
func TestSpanResolverUsesCaches(t *testing.T) {
	defer leaktest.AfterTest(t)()
	tc := testcluster.StartTestCluster(t, 4,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationManual,
			ServerArgs: base.TestServerArgs{
				UseDatabase: "t",
			},
		})
	defer tc.Stopper().Stop()

	rowRanges, _ := setupRanges(
		tc.Conns[0], tc.Servers[0], tc.Servers[0].KVClient().(*client.DB), t)

	// Replicate the row ranges on all of the first 3 nodes. Save the 4th node in
	// a pristine state, with empty caches.
	for i := 0; i < 3; i++ {
		var err error
		rowRanges[i], err = tc.AddReplicas(
			rowRanges[i].StartKey.AsRawKey(), tc.Target(1), tc.Target(2))
		if err != nil {
			t.Fatal(err)
		}
	}

	// Scatter the leases around; node i gets range i.
	for i := 0; i < 3; i++ {
		if err := tc.TransferRangeLease(rowRanges[i], tc.Target(i)); err != nil {
			t.Fatal(err)
		}
		// Wait for everybody to apply the new lease, so that we can rely on the
		// lease discovery done later by the SpanResolver to be up to date.
		testutils.SucceedsSoon(t, func() error {
			for j := 0; j < 3; j++ {
				target := tc.Target(j)
				rt, err := tc.FindRangeLeaseHolder(rowRanges[i], &target)
				if err != nil {
					return err
				}
				if rt != tc.Target(i) {
					return errors.Errorf("node %d hasn't applied the lease yet", j)
				}
			}
			return nil
		})
	}

	// Create a SpanResolver using the 4th node, with empty caches.
	s3 := tc.Servers[3]

	lr := distsqlplan.NewSpanResolver(
		s3.DistSender(), s3.Gossip(), s3.GetNode().Descriptor,
		distsqlplan.BinPackingLeaseHolderChoice)

	var spans []spanWithDir
	for i := 0; i < 3; i++ {
		spans = append(
			spans,
			spanWithDir{
				Span: roachpb.Span{
					Key:    rowRanges[i].StartKey.AsRawKey(),
					EndKey: rowRanges[i].EndKey.AsRawKey(),
				},
				dir: kv.Ascending,
			})
	}

	// Resolve the spans. Since the LeaseHolderCache is empty, all the ranges
	// should be grouped and "assigned" to replica 0.
	replicas, err := resolveSpans(context.TODO(), lr.NewSpanResolverIterator(), spans...)
	if err != nil {
		t.Fatal(err)
	}
	if len(replicas) != 3 {
		t.Fatalf("expected replies for 3 spans, got %d: %+v", len(replicas), replicas)
	}
	si := tc.Servers[0]

	nodeID := si.GetNode().Descriptor.NodeID
	storeID := si.GetFirstStoreID()
	for i := 0; i < 3; i++ {
		if len(replicas[i]) != 1 {
			t.Fatalf("expected 1 range for span %s, got %d (%+v)",
				spans[i].Span, len(replicas[i]), replicas[i])
		}
		rd := replicas[i][0].ReplicaDescriptor
		if rd.NodeID != nodeID || rd.StoreID != storeID {
			t.Fatalf("expected span %s to be on replica (%d, %d) but was on %s",
				spans[i].Span, nodeID, storeID, rd)
		}
	}

	// Now populate the cached on node 4 and query again. This time, we expect to see
	// each span on its own range.
	if err := populateCache(tc.Conns[3], 3 /* expectedNumRows */); err != nil {
		t.Fatal(err)
	}
//.........这里部分代码省略.........
开发者ID:EvilMcJerkface,项目名称:cockroach,代码行数:101,代码来源:span_resolver_test.go

示例11: TestAmbiguousCommitDueToLeadershipChange

// TestAmbiguousCommitDueToLeadershipChange verifies that an ambiguous
// commit error is returned from sql.Exec in situations where an
// EndTransaction is part of a batch and the disposition of the batch
// request is unknown after a network failure or timeout. The goal
// here is to prevent spurious transaction retries after the initial
// transaction actually succeeded. In cases where there's an
// auto-generated primary key, this can result in silent
// duplications. In cases where the primary key is specified in
// advance, it can result in violated uniqueness constraints, or
// duplicate key violations. See #6053, #7604, and #10023.
func TestAmbiguousCommitDueToLeadershipChange(t *testing.T) {
	defer leaktest.AfterTest(t)()
	t.Skip("#10341")

	// Create a command filter which prevents EndTransaction from
	// returning a response.
	params := base.TestServerArgs{}
	committed := make(chan struct{})
	wait := make(chan struct{})
	var tableStartKey atomic.Value
	var responseCount int32

	// Prevent the first conditional put on table 51 from returning to
	// waiting client in order to simulate a lost update or slow network
	// link.
	params.Knobs.Store = &storage.StoreTestingKnobs{
		TestingResponseFilter: func(ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error {
			req, ok := ba.GetArg(roachpb.ConditionalPut)
			tsk := tableStartKey.Load()
			if tsk == nil {
				return nil
			}
			if !ok || !bytes.HasPrefix(req.Header().Key, tsk.([]byte)) {
				return nil
			}
			// If this is the first write to the table, wait to respond to the
			// client in order to simulate a retry.
			if atomic.AddInt32(&responseCount, 1) == 1 {
				close(committed)
				<-wait
			}
			return nil
		},
	}
	testClusterArgs := base.TestClusterArgs{
		ReplicationMode: base.ReplicationAuto,
		ServerArgs:      params,
	}
	const numReplicas = 3
	tc := testcluster.StartTestCluster(t, numReplicas, testClusterArgs)
	defer tc.Stopper().Stop()

	sqlDB := sqlutils.MakeSQLRunner(t, tc.Conns[0])

	sqlDB.Exec(`CREATE DATABASE test`)
	sqlDB.Exec(`CREATE TABLE test.t (k SERIAL PRIMARY KEY, v INT)`)

	tableID := sqlutils.QueryTableID(t, tc.Conns[0], "test", "t")
	tableStartKey.Store(keys.MakeTablePrefix(tableID))

	// Wait for new table to split.
	util.SucceedsSoon(t, func() error {
		startKey := tableStartKey.Load().([]byte)

		desc, err := tc.LookupRange(keys.MakeRowSentinelKey(startKey))
		if err != nil {
			t.Fatal(err)
		}
		if !desc.StartKey.Equal(startKey) {
			return errors.Errorf("expected range start key %s; got %s",
				startKey, desc.StartKey)
		}
		return nil
	})

	// Lookup the lease.
	tableRangeDesc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey.Load().([]byte)))
	if err != nil {
		t.Fatal(err)
	}
	leaseHolder, err := tc.FindRangeLeaseHolder(
		&tableRangeDesc,
		&testcluster.ReplicationTarget{
			NodeID:  tc.Servers[0].GetNode().Descriptor.NodeID,
			StoreID: tc.Servers[0].GetFirstStoreID(),
		})
	if err != nil {
		t.Fatal(err)
	}

	// In a goroutine, send an insert which will commit but not return
	// from the leader (due to the command filter we installed on node 0).
	sqlErrCh := make(chan error, 1)
	go func() {
		// Use a connection other than through the node which is the current
		// leaseholder to ensure that we use GRPC instead of the local server.
		// If we use a local server, the hanging response we simulate takes
		// up the dist sender thread of execution because local requests are
		// executed synchronously.
		sqlConn := tc.Conns[leaseHolder.NodeID%numReplicas]
//.........这里部分代码省略.........
开发者ID:knz,项目名称:cockroach,代码行数:101,代码来源:ambiguous_commit_test.go

示例12: TestGossipFirstRange

func TestGossipFirstRange(t *testing.T) {
	defer leaktest.AfterTest(t)()

	tc := testcluster.StartTestCluster(t, 3,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationManual,
		})
	defer tc.Stopper().Stop()

	errors := make(chan error)
	descs := make(chan *roachpb.RangeDescriptor)
	unregister := tc.Servers[0].Gossip().RegisterCallback(gossip.KeyFirstRangeDescriptor,
		func(_ string, content roachpb.Value) {
			var desc roachpb.RangeDescriptor
			if err := content.GetProto(&desc); err != nil {
				errors <- err
			} else {
				descs <- &desc
			}
		},
	)
	// Unregister the callback before attempting to stop the stopper to prevent
	// deadlock. This is still flaky in theory since a callback can fire between
	// the last read from the channels and this unregister, but testing has
	// shown this solution to be sufficiently robust for now.
	defer unregister()

	// Wait for the specified descriptor to be gossiped for the first range. We
	// loop because the timing of replica addition and lease transfer can cause
	// extra gossiping of the first range.
	waitForGossip := func(desc *roachpb.RangeDescriptor) {
		for {
			select {
			case err := <-errors:
				t.Fatal(err)
			case gossiped := <-descs:
				if reflect.DeepEqual(desc, gossiped) {
					return
				}
				log.Infof(context.TODO(), "expected\n%+v\nbut found\n%+v", desc, gossiped)
			}
		}
	}

	// Expect an initial callback of the first range descriptor.
	select {
	case err := <-errors:
		t.Fatal(err)
	case <-descs:
	}

	// Add two replicas. The first range descriptor should be gossiped after each
	// addition.
	var desc *roachpb.RangeDescriptor
	firstRangeKey := keys.MinKey
	for i := 1; i <= 2; i++ {
		var err error
		if desc, err = tc.AddReplicas(firstRangeKey, tc.Target(i)); err != nil {
			t.Fatal(err)
		}
		waitForGossip(desc)
	}

	// Transfer the lease to a new node. This should cause the first range to be
	// gossiped again.
	if err := tc.TransferRangeLease(desc, tc.Target(1)); err != nil {
		t.Fatal(err)
	}
	waitForGossip(desc)

	// Remove a non-lease holder replica.
	desc, err := tc.RemoveReplicas(firstRangeKey, tc.Target(0))
	if err != nil {
		t.Fatal(err)
	}
	waitForGossip(desc)

	// TODO(peter): Re-enable or remove when we've resolved the discussion
	// about removing the lease-holder replica. See #7872.

	// // Remove the lease holder replica.
	// leaseHolder, err := tc.FindRangeLeaseHolder(desc, nil)
	// desc, err = tc.RemoveReplicas(firstRangeKey, leaseHolder)
	// if err != nil {
	// 	t.Fatal(err)
	// }
	// select {
	// case err := <-errors:
	// 	t.Fatal(err)
	// case gossiped := <-descs:
	// 	if !reflect.DeepEqual(desc, gossiped) {
	// 		t.Fatalf("expected\n%+v\nbut found\n%+v", desc, gossiped)
	// 	}
	// }
}
开发者ID:knz,项目名称:cockroach,代码行数:95,代码来源:gossip_test.go

示例13: TestLeaseInfoRequest

func TestLeaseInfoRequest(t *testing.T) {
	defer leaktest.AfterTest(t)()
	tc := testcluster.StartTestCluster(t, 3,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationManual,
		})
	defer tc.Stopper().Stop()

	kvDB0 := tc.Servers[0].DB()
	kvDB1 := tc.Servers[1].DB()

	key := []byte("a")
	rangeDesc := new(roachpb.RangeDescriptor)
	var err error
	*rangeDesc, err = tc.LookupRange(key)
	if err != nil {
		t.Fatal(err)
	}

	rangeDesc, err = tc.AddReplicas(
		rangeDesc.StartKey.AsRawKey(), tc.Target(1), tc.Target(2),
	)
	if err != nil {
		t.Fatal(err)
	}
	if len(rangeDesc.Replicas) != 3 {
		t.Fatalf("expected 3 replicas, got %+v", rangeDesc.Replicas)
	}
	replicas := make([]roachpb.ReplicaDescriptor, 3)
	for i := 0; i < 3; i++ {
		var ok bool
		replicas[i], ok = rangeDesc.GetReplicaDescriptor(tc.Servers[i].GetFirstStoreID())
		if !ok {
			t.Fatalf("expected to find replica in server %d", i)
		}
	}

	// Lease should start on Server 0, since nobody told it to move.
	leaseHolderReplica := LeaseInfo(t, kvDB0, *rangeDesc, roachpb.INCONSISTENT).Lease.Replica
	if leaseHolderReplica != replicas[0] {
		t.Fatalf("lease holder should be replica %+v, but is: %+v", replicas[0], leaseHolderReplica)
	}

	// Transfer the lease to Server 1 and check that LeaseInfoRequest gets the
	// right answer.
	err = tc.TransferRangeLease(rangeDesc, tc.Target(1))
	if err != nil {
		t.Fatal(err)
	}
	// An inconsistent LeaseInfoReqeust on the old lease holder should give us the
	// right answer immediately, since the old holder has definitely applied the
	// transfer before TransferRangeLease returned.
	leaseHolderReplica = LeaseInfo(t, kvDB0, *rangeDesc, roachpb.INCONSISTENT).Lease.Replica
	if leaseHolderReplica != replicas[1] {
		t.Fatalf("lease holder should be replica %+v, but is: %+v",
			replicas[1], leaseHolderReplica)
	}

	// A read on the new lease holder does not necessarily succeed immediately,
	// since it might take a while for it to apply the transfer.
	util.SucceedsSoon(t, func() error {
		// We can't reliably do a CONSISTENT read here, even though we're reading
		// from the supposed lease holder, because this node might initially be
		// unaware of the new lease and so the request might bounce around for a
		// while (see #8816).
		leaseHolderReplica = LeaseInfo(t, kvDB1, *rangeDesc, roachpb.INCONSISTENT).Lease.Replica
		if leaseHolderReplica != replicas[1] {
			return errors.Errorf("lease holder should be replica %+v, but is: %+v",
				replicas[1], leaseHolderReplica)
		}
		return nil
	})

	// Transfer the lease to Server 2 and check that LeaseInfoRequest gets the
	// right answer.
	err = tc.TransferRangeLease(rangeDesc, tc.Target(2))
	if err != nil {
		t.Fatal(err)
	}
	leaseHolderReplica = LeaseInfo(t, kvDB1, *rangeDesc, roachpb.INCONSISTENT).Lease.Replica
	if leaseHolderReplica != replicas[2] {
		t.Fatalf("lease holder should be replica %+v, but is: %+v", replicas[2], leaseHolderReplica)
	}

	// TODO(andrei): test the side-effect of LeaseInfoRequest when there's no
	// active lease - the node getting the request is supposed to acquire the
	// lease. This requires a way to expire leases; the TestCluster probably needs
	// to use a mock clock.
}
开发者ID:bdarnell,项目名称:cockroach,代码行数:89,代码来源:client_replica_test.go

示例14: TestAdminAPITableStats

func TestAdminAPITableStats(t *testing.T) {
	defer leaktest.AfterTest(t)()
	t.Skip("#8890")

	const nodeCount = 3
	tc := testcluster.StartTestCluster(t, nodeCount, base.TestClusterArgs{
		ReplicationMode: base.ReplicationAuto,
		ServerArgs: base.TestServerArgs{
			ScanInterval:    time.Millisecond,
			ScanMaxIdleTime: time.Millisecond,
		},
	})
	defer tc.Stopper().Stop()
	server0 := tc.Server(0)

	// Create clients (SQL, HTTP) connected to server 0.
	db := tc.ServerConn(0)

	client, err := server0.GetHTTPClient()
	if err != nil {
		t.Fatal(err)
	}

	client.Timeout = base.NetworkTimeout * 3

	// Make a single table and insert some data. The database and test have
	// names which require escaping, in order to verify that database and
	// table names are being handled correctly.
	if _, err := db.Exec(`CREATE DATABASE "test test"`); err != nil {
		t.Fatal(err)
	}
	if _, err := db.Exec(`
		CREATE TABLE "test test"."foo foo" (
			id INT PRIMARY KEY,
			val STRING
		)`,
	); err != nil {
		t.Fatal(err)
	}
	for i := 0; i < 10; i++ {
		if _, err := db.Exec(`
			INSERT INTO "test test"."foo foo" VALUES(
				$1, $2
			)`, i, "test",
		); err != nil {
			t.Fatal(err)
		}
	}

	url := server0.AdminURL() + "/_admin/v1/databases/test test/tables/foo foo/stats"
	var tsResponse serverpb.TableStatsResponse

	// The new SQL table may not yet have split into its own range. Wait for
	// this to occur, and for full replication.
	util.SucceedsSoon(t, func() error {
		if err := httputil.GetJSON(client, url, &tsResponse); err != nil {
			return err
		}
		if tsResponse.RangeCount != 1 {
			return errors.Errorf("Table range not yet separated.")
		}
		if tsResponse.NodeCount != nodeCount {
			return errors.Errorf("Table range not yet replicated to %d nodes.", 3)
		}
		if a, e := tsResponse.ReplicaCount, int64(nodeCount); a != e {
			return errors.Errorf("expected %d replicas, found %d", e, a)
		}
		return nil
	})

	// These two conditions *must* be true, given that the above
	// SucceedsSoon has succeeded.
	if a, e := tsResponse.Stats.KeyCount, int64(20); a < e {
		t.Fatalf("expected at least 20 total keys, found %d", a)
	}
	if len(tsResponse.MissingNodes) > 0 {
		t.Fatalf("expected no missing nodes, found %v", tsResponse.MissingNodes)
	}

	// Kill a node, ensure it shows up in MissingNodes and that ReplicaCount is
	// lower.
	tc.StopServer(1)

	if err := httputil.GetJSON(client, url, &tsResponse); err != nil {
		t.Fatal(err)
	}
	if a, e := tsResponse.NodeCount, int64(nodeCount); a != e {
		t.Errorf("expected %d nodes, found %d", e, a)
	}
	if a, e := tsResponse.RangeCount, int64(1); a != e {
		t.Errorf("expected %d ranges, found %d", e, a)
	}
	if a, e := tsResponse.ReplicaCount, int64((nodeCount/2)+1); a != e {
		t.Errorf("expected %d replicas, found %d", e, a)
	}
	if a, e := tsResponse.Stats.KeyCount, int64(10); a < e {
		t.Errorf("expected at least 10 total keys, found %d", a)
	}
	if len(tsResponse.MissingNodes) != 1 {
		t.Errorf("expected one missing node, found %v", tsResponse.MissingNodes)
//.........这里部分代码省略.........
开发者ID:knz,项目名称:cockroach,代码行数:101,代码来源:admin_cluster_test.go


注:本文中的github.com/cockroachdb/cockroach/pkg/testutils/testcluster.StartTestCluster函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。