本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/testutils/sqlutils.MakeSQLRunner函數的典型用法代碼示例。如果您正苦於以下問題:Golang MakeSQLRunner函數的具體用法?Golang MakeSQLRunner怎麽用?Golang MakeSQLRunner使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了MakeSQLRunner函數的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: setup
func (t *parallelTest) setup(spec *parTestSpec) {
if spec.ClusterSize == 0 {
spec.ClusterSize = 1
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Cluster Size: %d", spec.ClusterSize)
}
args := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
SQLExecutor: &sql.ExecutorTestingKnobs{
WaitForGossipUpdate: true,
CheckStmtStringChange: true,
},
},
},
}
t.cluster = serverutils.StartTestCluster(t, spec.ClusterSize, args)
t.clients = make([][]*gosql.DB, spec.ClusterSize)
for i := range t.clients {
t.clients[i] = append(t.clients[i], t.cluster.ServerConn(i))
}
r0 := sqlutils.MakeSQLRunner(t, t.clients[0][0])
if spec.RangeSplitSize != 0 {
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Setting range split size: %d", spec.RangeSplitSize)
}
zoneCfg := config.DefaultZoneConfig()
zoneCfg.RangeMaxBytes = int64(spec.RangeSplitSize)
zoneCfg.RangeMinBytes = zoneCfg.RangeMaxBytes / 2
buf, err := protoutil.Marshal(&zoneCfg)
if err != nil {
t.Fatal(err)
}
objID := keys.RootNamespaceID
r0.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf)
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Creating database")
}
r0.Exec("CREATE DATABASE test")
for i := range t.clients {
sqlutils.MakeSQLRunner(t, t.clients[i][0]).Exec("SET DATABASE = test")
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Test setup done")
}
}
示例2: TestSplitAtTableBoundary
func TestSplitAtTableBoundary(t *testing.T) {
defer leaktest.AfterTest(t)()
testClusterArgs := base.TestClusterArgs{
ReplicationMode: base.ReplicationAuto,
}
tc := testcluster.StartTestCluster(t, 3, testClusterArgs)
defer tc.Stopper().Stop()
runner := sqlutils.MakeSQLRunner(t, tc.Conns[0])
runner.Exec(`CREATE DATABASE test`)
runner.Exec(`CREATE TABLE test.t (k SERIAL PRIMARY KEY, v INT)`)
const tableIDQuery = `
SELECT tables.id FROM system.namespace tables
JOIN system.namespace dbs ON dbs.id = tables.parentid
WHERE dbs.name = $1 AND tables.name = $2
`
var tableID uint32
runner.QueryRow(tableIDQuery, "test", "t").Scan(&tableID)
tableStartKey := keys.MakeTablePrefix(tableID)
// Wait for new table to split.
testutils.SucceedsSoon(t, func() error {
desc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey))
if err != nil {
t.Fatal(err)
}
if !desc.StartKey.Equal(tableStartKey) {
log.Infof(context.TODO(), "waiting on split results")
return errors.Errorf("expected range start key %s; got %s", tableStartKey, desc.StartKey)
}
return nil
})
}
示例3: makeMutationTest
func makeMutationTest(
t *testing.T, kvDB *client.DB, db *gosql.DB, tableDesc *sqlbase.TableDescriptor,
) mutationTest {
return mutationTest{
SQLRunner: sqlutils.MakeSQLRunner(t, db),
kvDB: kvDB,
tableDesc: tableDesc,
}
}
示例4: backupRestoreTestSetup
func backupRestoreTestSetup(
t testing.TB, numAccounts int,
) (
ctx context.Context,
tempDir string,
tc *testcluster.TestCluster,
kvDB *client.DB,
sqlDB *sqlutils.SQLRunner,
cleanup func(),
) {
ctx = context.Background()
dir, dirCleanupFn := testutils.TempDir(t, 1)
// Use ReplicationManual so we can force full replication, which is needed
// to later move the leases around.
tc = testcluster.StartTestCluster(t, backupRestoreClusterSize, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
})
sqlDB = sqlutils.MakeSQLRunner(t, tc.Conns[0])
kvDB = tc.Server(0).KVClient().(*client.DB)
sqlDB.Exec(bankCreateDatabase)
sqlDB.Exec(bankCreateTable)
for _, insert := range bankDataInsertStmts(numAccounts) {
sqlDB.Exec(insert)
}
for _, split := range bankSplitStmts(numAccounts, backupRestoreDefaultRanges) {
sqlDB.Exec(split)
}
targets := make([]testcluster.ReplicationTarget, backupRestoreClusterSize-1)
for i := 1; i < backupRestoreClusterSize; i++ {
targets[i-1] = tc.Target(i)
}
txn := client.NewTxn(ctx, *kvDB)
rangeDescs, err := sql.AllRangeDescriptors(txn)
if err != nil {
t.Fatal(err)
}
for _, r := range rangeDescs {
if _, err := tc.AddReplicas(r.StartKey.AsRawKey(), targets...); err != nil {
t.Fatal(err)
}
}
cleanupFn := func() {
tc.Stopper().Stop()
dirCleanupFn()
}
return ctx, dir, tc, kvDB, sqlDB, cleanupFn
}
示例5: TestBackupRestoreOnce
func TestBackupRestoreOnce(t *testing.T) {
defer leaktest.AfterTest(t)()
// TODO(dan): Actually invalidate the descriptor cache and delete this line.
defer sql.TestDisableTableLeases()()
const numAccounts = 1000
ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(t, numAccounts)
defer cleanupFn()
{
desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now())
if err != nil {
t.Fatal(err)
}
approxDataSize := int64(backupRestoreRowPayloadSize) * numAccounts
if max := approxDataSize * 2; desc.DataSize < approxDataSize || desc.DataSize > 2*max {
t.Errorf("expected data size in [%d,%d] but was %d", approxDataSize, max, desc.DataSize)
}
}
// Start a new cluster to restore into.
{
tcRestore := testcluster.StartTestCluster(t, backupRestoreClusterSize, base.TestClusterArgs{})
defer tcRestore.Stopper().Stop()
sqlDBRestore := sqlutils.MakeSQLRunner(t, tcRestore.Conns[0])
kvDBRestore := tcRestore.Server(0).KVClient().(*client.DB)
// Restore assumes the database exists.
sqlDBRestore.Exec(bankCreateDatabase)
table := parser.TableName{DatabaseName: "bench", TableName: "bank"}
if _, err := sql.Restore(ctx, *kvDBRestore, dir, table); err != nil {
t.Fatal(err)
}
var rowCount int
sqlDBRestore.QueryRow(`SELECT COUNT(*) FROM bench.bank`).Scan(&rowCount)
if rowCount != numAccounts {
t.Fatalf("expected %d rows but found %d", numAccounts, rowCount)
}
}
}
示例6: getClient
func (t *parallelTest) getClient(nodeIdx, clientIdx int) *gosql.DB {
for len(t.clients[nodeIdx]) <= clientIdx {
// Add a client.
pgURL, cleanupFunc := sqlutils.PGUrl(t.T,
t.cluster.Server(nodeIdx).ServingAddr(),
"TestParallel",
url.User(security.RootUser))
db, err := gosql.Open("postgres", pgURL.String())
if err != nil {
t.Fatal(err)
}
sqlutils.MakeSQLRunner(t, db).Exec("SET DATABASE = test")
t.cluster.Stopper().AddCloser(
stop.CloserFn(func() {
_ = db.Close()
cleanupFunc()
}))
t.clients[nodeIdx] = append(t.clients[nodeIdx], db)
}
return t.clients[nodeIdx][clientIdx]
}
示例7: TestSplitAt
func TestSplitAt(t *testing.T) {
defer leaktest.AfterTest(t)()
params, _ := createTestServerParams()
s, db, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
r := sqlutils.MakeSQLRunner(t, db)
r.Exec("CREATE DATABASE d")
r.Exec(`CREATE TABLE d.t (
i INT,
s STRING,
PRIMARY KEY (i, s),
INDEX s_idx (s)
)`)
r.Exec(`CREATE TABLE d.i (k INT PRIMARY KEY)`)
tests := []struct {
in string
error string
args []interface{}
}{
{
in: "ALTER TABLE d.t SPLIT AT (2, 'b')",
},
{
in: "ALTER TABLE d.t SPLIT AT (2, 'b')",
error: "range is already split",
},
{
in: "ALTER TABLE d.t SPLIT AT ('c', 3)",
error: "argument of SPLIT AT must be type int, not type string",
},
{
in: "ALTER TABLE d.t SPLIT AT (4)",
error: "expected 2 expressions, got 1",
},
{
in: "ALTER TABLE d.t SPLIT AT (5, 'e')",
},
{
in: "ALTER TABLE d.t SPLIT AT (i, s)",
error: `name "i" is not defined`,
},
{
in: "ALTER INDEX [email protected]_idx SPLIT AT ('f')",
},
{
in: "ALTER INDEX [email protected]_present SPLIT AT ('g')",
error: `index "not_present" does not exist`,
},
{
in: "ALTER TABLE d.i SPLIT AT (avg(1))",
error: "unknown signature: avg(int) (desired <int>)",
},
{
in: "ALTER TABLE d.i SPLIT AT (avg(k))",
error: `avg(): name "k" is not defined`,
},
{
in: "ALTER TABLE d.i SPLIT AT ($1)",
args: []interface{}{8},
},
{
in: "ALTER TABLE d.i SPLIT AT ($1)",
error: "no value provided for placeholder: $1",
},
{
in: "ALTER TABLE d.i SPLIT AT ($1)",
args: []interface{}{"blah"},
error: "error in argument for $1: strconv.ParseInt",
},
{
in: "ALTER TABLE d.i SPLIT AT ($1::string)",
args: []interface{}{"1"},
error: "argument of SPLIT AT must be type int, not type string",
},
{
in: "ALTER TABLE d.i SPLIT AT ((SELECT 1))",
},
{
in: "ALTER TABLE d.i SPLIT AT ((SELECT 1, 2))",
error: "subquery must return only one column, found 2",
},
}
for _, tt := range tests {
var key roachpb.Key
var pretty string
err := db.QueryRow(tt.in, tt.args...).Scan(&key, &pretty)
if err != nil && tt.error == "" {
t.Fatalf("%s: unexpected error: %s", tt.in, err)
} else if tt.error != "" && err == nil {
t.Fatalf("%s: expected error: %s", tt.in, tt.error)
} else if err != nil && tt.error != "" {
if !strings.Contains(err.Error(), tt.error) {
t.Fatalf("%s: unexpected error: %s", tt.in, err)
}
} else {
//.........這裏部分代碼省略.........
示例8: TestAmbiguousCommitDueToLeadershipChange
// TestAmbiguousCommitDueToLeadershipChange verifies that an ambiguous
// commit error is returned from sql.Exec in situations where an
// EndTransaction is part of a batch and the disposition of the batch
// request is unknown after a network failure or timeout. The goal
// here is to prevent spurious transaction retries after the initial
// transaction actually succeeded. In cases where there's an
// auto-generated primary key, this can result in silent
// duplications. In cases where the primary key is specified in
// advance, it can result in violated uniqueness constraints, or
// duplicate key violations. See #6053, #7604, and #10023.
func TestAmbiguousCommitDueToLeadershipChange(t *testing.T) {
defer leaktest.AfterTest(t)()
t.Skip("#10341")
// Create a command filter which prevents EndTransaction from
// returning a response.
params := base.TestServerArgs{}
committed := make(chan struct{})
wait := make(chan struct{})
var tableStartKey atomic.Value
var responseCount int32
// Prevent the first conditional put on table 51 from returning to
// waiting client in order to simulate a lost update or slow network
// link.
params.Knobs.Store = &storage.StoreTestingKnobs{
TestingResponseFilter: func(ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error {
req, ok := ba.GetArg(roachpb.ConditionalPut)
tsk := tableStartKey.Load()
if tsk == nil {
return nil
}
if !ok || !bytes.HasPrefix(req.Header().Key, tsk.([]byte)) {
return nil
}
// If this is the first write to the table, wait to respond to the
// client in order to simulate a retry.
if atomic.AddInt32(&responseCount, 1) == 1 {
close(committed)
<-wait
}
return nil
},
}
testClusterArgs := base.TestClusterArgs{
ReplicationMode: base.ReplicationAuto,
ServerArgs: params,
}
const numReplicas = 3
tc := testcluster.StartTestCluster(t, numReplicas, testClusterArgs)
defer tc.Stopper().Stop()
sqlDB := sqlutils.MakeSQLRunner(t, tc.Conns[0])
sqlDB.Exec(`CREATE DATABASE test`)
sqlDB.Exec(`CREATE TABLE test.t (k SERIAL PRIMARY KEY, v INT)`)
tableID := sqlutils.QueryTableID(t, tc.Conns[0], "test", "t")
tableStartKey.Store(keys.MakeTablePrefix(tableID))
// Wait for new table to split.
util.SucceedsSoon(t, func() error {
startKey := tableStartKey.Load().([]byte)
desc, err := tc.LookupRange(keys.MakeRowSentinelKey(startKey))
if err != nil {
t.Fatal(err)
}
if !desc.StartKey.Equal(startKey) {
return errors.Errorf("expected range start key %s; got %s",
startKey, desc.StartKey)
}
return nil
})
// Lookup the lease.
tableRangeDesc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey.Load().([]byte)))
if err != nil {
t.Fatal(err)
}
leaseHolder, err := tc.FindRangeLeaseHolder(
&tableRangeDesc,
&testcluster.ReplicationTarget{
NodeID: tc.Servers[0].GetNode().Descriptor.NodeID,
StoreID: tc.Servers[0].GetFirstStoreID(),
})
if err != nil {
t.Fatal(err)
}
// In a goroutine, send an insert which will commit but not return
// from the leader (due to the command filter we installed on node 0).
sqlErrCh := make(chan error, 1)
go func() {
// Use a connection other than through the node which is the current
// leaseholder to ensure that we use GRPC instead of the local server.
// If we use a local server, the hanging response we simulate takes
// up the dist sender thread of execution because local requests are
// executed synchronously.
sqlConn := tc.Conns[leaseHolder.NodeID%numReplicas]
//.........這裏部分代碼省略.........
示例9: TestManualReplication
func TestManualReplication(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := StartTestCluster(t, 3,
base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgs: base.TestServerArgs{
UseDatabase: "t",
},
})
defer tc.Stopper().Stop()
s0 := sqlutils.MakeSQLRunner(t, tc.Conns[0])
s1 := sqlutils.MakeSQLRunner(t, tc.Conns[1])
s2 := sqlutils.MakeSQLRunner(t, tc.Conns[2])
s0.Exec(`CREATE DATABASE t`)
s0.Exec(`CREATE TABLE test (k INT PRIMARY KEY, v INT)`)
s0.Exec(`INSERT INTO test VALUES (5, 1), (4, 2), (1, 2)`)
if r := s1.Query(`SELECT * FROM test WHERE k = 5`); !r.Next() {
t.Fatal("no rows")
}
s2.ExecRowsAffected(3, `DELETE FROM test`)
// Split the table to a new range.
kvDB := tc.Servers[0].DB()
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
tableStartKey := keys.MakeRowSentinelKey(keys.MakeTablePrefix(uint32(tableDesc.ID)))
leftRangeDesc, tableRangeDesc, err := tc.SplitRange(tableStartKey)
if err != nil {
t.Fatal(err)
}
log.Infof(context.Background(), "After split got ranges: %+v and %+v.", leftRangeDesc, tableRangeDesc)
if len(tableRangeDesc.Replicas) == 0 {
t.Fatalf(
"expected replica on node 1, got no replicas: %+v", tableRangeDesc.Replicas)
}
if tableRangeDesc.Replicas[0].NodeID != 1 {
t.Fatalf(
"expected replica on node 1, got replicas: %+v", tableRangeDesc.Replicas)
}
// Replicate the table's range to all the nodes.
tableRangeDesc, err = tc.AddReplicas(
tableRangeDesc.StartKey.AsRawKey(), tc.Target(1), tc.Target(2),
)
if err != nil {
t.Fatal(err)
}
if len(tableRangeDesc.Replicas) != 3 {
t.Fatalf("expected 3 replicas, got %+v", tableRangeDesc.Replicas)
}
for i := 0; i < 3; i++ {
if _, ok := tableRangeDesc.GetReplicaDescriptor(
tc.Servers[i].GetFirstStoreID()); !ok {
t.Fatalf("expected replica on store %d, got %+v",
tc.Servers[i].GetFirstStoreID(), tableRangeDesc.Replicas)
}
}
// Transfer the lease to node 1.
leaseHolder, err := tc.FindRangeLeaseHolder(
tableRangeDesc,
&ReplicationTarget{
NodeID: tc.Servers[0].GetNode().Descriptor.NodeID,
StoreID: tc.Servers[0].GetFirstStoreID(),
})
if err != nil {
t.Fatal(err)
}
if leaseHolder.StoreID != tc.Servers[0].GetFirstStoreID() {
t.Fatalf("expected initial lease on server idx 0, but is on node: %+v",
leaseHolder)
}
err = tc.TransferRangeLease(tableRangeDesc, tc.Target(1))
if err != nil {
t.Fatal(err)
}
// Check that the lease holder has changed. We'll use the old lease holder as
// the hint, since it's guaranteed that the old lease holder has applied the
// new lease.
leaseHolder, err = tc.FindRangeLeaseHolder(
tableRangeDesc,
&ReplicationTarget{
NodeID: tc.Servers[0].GetNode().Descriptor.NodeID,
StoreID: tc.Servers[0].GetFirstStoreID(),
})
if err != nil {
t.Fatal(err)
}
if leaseHolder.StoreID != tc.Servers[1].GetFirstStoreID() {
t.Fatalf("expected lease on server idx 1 (node: %d store: %d), but is on node: %+v",
tc.Servers[1].GetNode().Descriptor.NodeID,
tc.Servers[1].GetFirstStoreID(),
leaseHolder)
//.........這裏部分代碼省略.........
示例10: TestServer
func TestServer(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
conn, err := s.RPCContext().GRPCDial(s.ServingAddr())
if err != nil {
t.Fatal(err)
}
r := sqlutils.MakeSQLRunner(t, sqlDB)
r.Exec(`CREATE DATABASE test`)
r.Exec(`CREATE TABLE test.t (a INT PRIMARY KEY, b INT)`)
r.Exec(`INSERT INTO test.t VALUES (1, 10), (2, 20), (3, 30)`)
td := sqlbase.GetTableDescriptor(kvDB, "test", "t")
ts := TableReaderSpec{
Table: *td,
IndexIdx: 0,
Reverse: false,
Spans: nil,
Filter: Expression{Expr: "$0 != 2"}, // a != 2
OutputColumns: []uint32{0, 1}, // a
}
txn := client.NewTxn(context.Background(), *kvDB)
req := &SetupFlowRequest{Txn: txn.Proto}
req.Flow = FlowSpec{
Processors: []ProcessorSpec{{
Core: ProcessorCoreUnion{TableReader: &ts},
Output: []OutputRouterSpec{{
Type: OutputRouterSpec_MIRROR,
Streams: []StreamEndpointSpec{{Mailbox: &MailboxSpec{SimpleResponse: true}}},
}},
}},
}
distSQLClient := NewDistSQLClient(conn)
stream, err := distSQLClient.RunSimpleFlow(context.Background(), req)
if err != nil {
t.Fatal(err)
}
var decoder StreamDecoder
var rows sqlbase.EncDatumRows
for {
msg, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
t.Fatal(err)
}
err = decoder.AddMessage(msg)
if err != nil {
t.Fatal(err)
}
rows = testGetDecodedRows(t, &decoder, rows)
}
if done, trailerErr := decoder.IsDone(); !done {
t.Fatal("stream not done")
} else if trailerErr != nil {
t.Fatal("error in the stream trailer:", trailerErr)
}
str := rows.String()
expected := "[[1 10] [3 30]]"
if str != expected {
t.Errorf("invalid results: %s, expected %s'", str, expected)
}
}