本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/keys.MakeTablePrefix函數的典型用法代碼示例。如果您正苦於以下問題:Golang MakeTablePrefix函數的具體用法?Golang MakeTablePrefix怎麽用?Golang MakeTablePrefix使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了MakeTablePrefix函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestObjectIDForKey
func TestObjectIDForKey(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
key roachpb.RKey
success bool
id uint32
}{
// Before the structured span.
{roachpb.RKeyMin, false, 0},
// Boundaries of structured span.
{roachpb.RKeyMax, false, 0},
// Valid, even if there are things after the ID.
{testutils.MakeKey(keys.MakeTablePrefix(42), roachpb.RKey("\xff")), true, 42},
{keys.MakeTablePrefix(0), true, 0},
{keys.MakeTablePrefix(999), true, 999},
}
for tcNum, tc := range testCases {
id, success := config.ObjectIDForKey(tc.key)
if success != tc.success {
t.Errorf("#%d: expected success=%t", tcNum, tc.success)
continue
}
if id != tc.id {
t.Errorf("#%d: expected id=%d, got %d", tcNum, tc.id, id)
}
}
}
示例2: sqlKV
func sqlKV(tableID uint32, indexID, descriptorID uint64) roachpb.KeyValue {
k := keys.MakeTablePrefix(tableID)
k = encoding.EncodeUvarintAscending(k, indexID)
k = encoding.EncodeUvarintAscending(k, descriptorID)
k = encoding.EncodeUvarintAscending(k, 12345) // Column ID, but could be anything.
return kv(k, nil)
}
示例3: TestDropIndexInterleaved
func TestDropIndexInterleaved(t *testing.T) {
defer leaktest.AfterTest(t)()
const chunkSize = 200
params, _ := createTestServerParams()
params.Knobs = base.TestingKnobs{
SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{
BackfillChunkSize: chunkSize,
},
}
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
numRows := 2*chunkSize + 1
createKVInterleavedTable(t, sqlDB, numRows)
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv")
tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
checkKeyCount(t, kvDB, tablePrefix, 3*numRows)
if _, err := sqlDB.Exec(`DROP INDEX [email protected]_idx`); err != nil {
t.Fatal(err)
}
checkKeyCount(t, kvDB, tablePrefix, 2*numRows)
// Ensure that index is not active.
tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "intlv")
if _, _, err := tableDesc.FindIndexByName("intlv_idx"); err == nil {
t.Fatalf("table descriptor still contains index after index is dropped")
}
}
示例4: monkey
func (z *zeroSum) monkey(tableID uint32, d time.Duration) {
r := newRand()
zipf := z.accountDistribution(r)
for {
time.Sleep(time.Duration(rand.Float64() * float64(d)))
key := keys.MakeTablePrefix(tableID)
key = encoding.EncodeVarintAscending(key, int64(zipf.Uint64()))
key = keys.MakeRowSentinelKey(key)
switch r.Intn(2) {
case 0:
if err := z.Split(z.RandNode(r.Intn), key); err != nil {
if strings.Contains(err.Error(), "range is already split at key") ||
strings.Contains(err.Error(), storage.ErrMsgConflictUpdatingRangeDesc) {
continue
}
z.maybeLogError(err)
} else {
atomic.AddUint64(&z.stats.splits, 1)
}
case 1:
if transferred, err := z.TransferLease(z.RandNode(r.Intn), r, key); err != nil {
z.maybeLogError(err)
} else if transferred {
atomic.AddUint64(&z.stats.transfers, 1)
}
}
}
}
示例5: TestDropTableInterleaved
// TestDropTableInterleaved tests dropping a table that is interleaved within
// another table.
func TestDropTableInterleaved(t *testing.T) {
defer leaktest.AfterTest(t)()
params, _ := createTestServerParams()
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
numRows := 2*sql.TableTruncateChunkSize + 1
createKVInterleavedTable(t, sqlDB, numRows)
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv")
tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
checkKeyCount(t, kvDB, tablePrefix, 3*numRows)
if _, err := sqlDB.Exec(`DROP TABLE t.intlv`); err != nil {
t.Fatal(err)
}
checkKeyCount(t, kvDB, tablePrefix, numRows)
// Test that deleted table cannot be used. This prevents regressions where
// name -> descriptor ID caches might make this statement erronously work.
if _, err := sqlDB.Exec(`SELECT * FROM t.intlv`); !testutils.IsError(
err, `table "t.intlv" does not exist`,
) {
t.Fatalf("different error than expected: %v", err)
}
}
示例6: TestSplitAtTableBoundary
func TestSplitAtTableBoundary(t *testing.T) {
defer leaktest.AfterTest(t)()
testClusterArgs := base.TestClusterArgs{
ReplicationMode: base.ReplicationAuto,
}
tc := testcluster.StartTestCluster(t, 3, testClusterArgs)
defer tc.Stopper().Stop()
runner := sqlutils.MakeSQLRunner(t, tc.Conns[0])
runner.Exec(`CREATE DATABASE test`)
runner.Exec(`CREATE TABLE test.t (k SERIAL PRIMARY KEY, v INT)`)
const tableIDQuery = `
SELECT tables.id FROM system.namespace tables
JOIN system.namespace dbs ON dbs.id = tables.parentid
WHERE dbs.name = $1 AND tables.name = $2
`
var tableID uint32
runner.QueryRow(tableIDQuery, "test", "t").Scan(&tableID)
tableStartKey := keys.MakeTablePrefix(tableID)
// Wait for new table to split.
testutils.SucceedsSoon(t, func() error {
desc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey))
if err != nil {
t.Fatal(err)
}
if !desc.StartKey.Equal(tableStartKey) {
log.Infof(context.TODO(), "waiting on split results")
return errors.Errorf("expected range start key %s; got %s", tableStartKey, desc.StartKey)
}
return nil
})
}
示例7: restoreTableDesc
func restoreTableDesc(
ctx context.Context,
txn *client.Txn,
database sqlbase.DatabaseDescriptor,
table sqlbase.TableDescriptor,
) error {
// Run getDescriptorID again to make sure the database hasn't been dropped
// while we were importing.
var err error
if table.ParentID, err = getDescriptorID(txn, tableKey{name: database.Name}); err != nil {
return err
}
tableIDKey := tableKey{parentID: table.ParentID, name: table.Name}.Key()
tableDescKey := sqlbase.MakeDescMetadataKey(table.ID)
// Check for an existing table.
var existingDesc sqlbase.Descriptor
existingIDKV, err := txn.Get(tableIDKey)
if err != nil {
return err
}
if existingIDKV.Value != nil {
existingID, err := existingIDKV.Value.GetInt()
if err != nil {
return err
}
existingDescKV, err := txn.Get(sqlbase.MakeDescMetadataKey(sqlbase.ID(existingID)))
if err != nil {
return err
}
if err := existingDescKV.Value.GetProto(&existingDesc); err != nil {
return err
}
}
// Write the new descriptors. First the ID -> TableDescriptor for the new
// table, then flip (or initialize) the name -> ID entry so any new queries
// will use the new one. If there was an existing table, it can now be
// cleaned up.
b := txn.NewBatch()
b.CPut(tableDescKey, sqlbase.WrapDescriptor(&table), nil)
if existingTable := existingDesc.GetTable(); existingTable == nil {
b.CPut(tableIDKey, table.ID, nil)
} else {
existingIDKV.Value.ClearChecksum()
b.CPut(tableIDKey, table.ID, existingIDKV.Value)
// TODO(dan): This doesn't work for interleaved tables. Fix it when we
// fix the empty range interleaved table TODO below.
existingDataPrefix := roachpb.Key(keys.MakeTablePrefix(uint32(existingTable.ID)))
b.DelRange(existingDataPrefix, existingDataPrefix.PrefixEnd(), false)
zoneKey, _, descKey := GetKeysForTableDescriptor(existingTable)
// Delete the desc and zone entries. Leave the name because the new
// table is using it.
b.Del(descKey)
b.Del(zoneKey)
}
return txn.Run(b)
}
示例8: checkTableSize
// checkTableSize checks that the number of key:value pairs stored
// in the table equals e.
func (mt mutationTest) checkTableSize(e int) {
// Check that there are no hidden values
tablePrefix := keys.MakeTablePrefix(uint32(mt.tableDesc.ID))
tableStartKey := roachpb.Key(tablePrefix)
tableEndKey := tableStartKey.PrefixEnd()
if kvs, err := mt.kvDB.Scan(context.TODO(), tableStartKey, tableEndKey, 0); err != nil {
mt.Error(err)
} else if len(kvs) != e {
mt.Errorf("expected %d key value pairs, but got %d", e, len(kvs))
}
}
示例9: MakeNameMetadataKey
// MakeNameMetadataKey returns the key for the name. Pass name == "" in order
// to generate the prefix key to use to scan over all of the names for the
// specified parentID.
func MakeNameMetadataKey(parentID ID, name string) roachpb.Key {
normName := parser.ReNormalizeName(name)
k := keys.MakeTablePrefix(uint32(NamespaceTable.ID))
k = encoding.EncodeUvarintAscending(k, uint64(NamespaceTable.PrimaryIndex.ID))
k = encoding.EncodeUvarintAscending(k, uint64(parentID))
if name != "" {
k = encoding.EncodeBytesAscending(k, []byte(normName))
k = keys.MakeFamilyKey(k, uint32(NamespaceTable.Columns[2].ID))
}
return k
}
示例10: allSQLDescriptors
func allSQLDescriptors(txn *client.Txn) ([]sqlbase.Descriptor, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
// TODO(dan): Iterate with some batch size.
rows, err := txn.Scan(startKey, endKey, 0)
if err != nil {
return nil, errors.Wrap(err, "unable to scan SQL descriptors")
}
sqlDescs := make([]sqlbase.Descriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&sqlDescs[i]); err != nil {
return nil, errors.Wrapf(err, "%s: unable to unmarshal SQL descriptor", row.Key)
}
}
return sqlDescs, nil
}
示例11: GetTableSpan
// GetTableSpan gets the key span for a SQL table, including any indices.
func (ie InternalExecutor) GetTableSpan(
user string, txn *client.Txn, dbName, tableName string,
) (roachpb.Span, error) {
// Lookup the table ID.
p := makeInternalPlanner("get-table-span", txn, user, ie.LeaseManager.memMetrics)
defer finishInternalPlanner(p)
p.leaseMgr = ie.LeaseManager
tn := parser.TableName{DatabaseName: parser.Name(dbName), TableName: parser.Name(tableName)}
tableID, err := getTableID(p, &tn)
if err != nil {
return roachpb.Span{}, err
}
// Determine table data span.
tablePrefix := keys.MakeTablePrefix(uint32(tableID))
tableStartKey := roachpb.Key(tablePrefix)
tableEndKey := tableStartKey.PrefixEnd()
return roachpb.Span{Key: tableStartKey, EndKey: tableEndKey}, nil
}
示例12: MakeAllDescsMetadataKey
// MakeAllDescsMetadataKey returns the key for all descriptors.
func MakeAllDescsMetadataKey() roachpb.Key {
k := keys.MakeTablePrefix(uint32(DescriptorTable.ID))
return encoding.EncodeUvarintAscending(k, uint64(DescriptorTable.PrimaryIndex.ID))
}
示例13: TestSplitOnTableBoundaries
// TestSplitOnTableBoundaries verifies that ranges get split
// as new tables get created.
func TestSplitOnTableBoundaries(t *testing.T) {
defer leaktest.AfterTest(t)()
params, _ := createTestServerParams()
// We want fast scan.
params.ScanInterval = time.Millisecond
params.ScanMaxIdleTime = time.Millisecond
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
expectedInitialRanges := server.ExpectedInitialRangeCount()
if _, err := sqlDB.Exec(`CREATE DATABASE test`); err != nil {
t.Fatal(err)
}
// We split up to the largest allocated descriptor ID, be it a table
// or a database.
util.SucceedsSoon(t, func() error {
num, err := getNumRanges(kvDB)
if err != nil {
return err
}
if e := expectedInitialRanges + 1; num != e {
return errors.Errorf("expected %d splits, found %d", e, num)
}
return nil
})
// Verify the actual splits.
objectID := uint32(keys.MaxReservedDescID + 1)
splits := []roachpb.RKey{keys.MakeTablePrefix(objectID), roachpb.RKeyMax}
ranges, err := getRangeKeys(kvDB)
if err != nil {
t.Fatal(err)
}
if a, e := ranges[expectedInitialRanges-1:], splits; !rangesMatchSplits(a, e) {
t.Fatalf("Found ranges: %v\nexpected: %v", a, e)
}
// Let's create a table.
if _, err := sqlDB.Exec(`CREATE TABLE test.test (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
num, err := getNumRanges(kvDB)
if err != nil {
return err
}
if e := expectedInitialRanges + 2; num != e {
return errors.Errorf("expected %d splits, found %d", e, num)
}
return nil
})
// Verify the actual splits.
splits = []roachpb.RKey{keys.MakeTablePrefix(objectID), keys.MakeTablePrefix(objectID + 1), roachpb.RKeyMax}
ranges, err = getRangeKeys(kvDB)
if err != nil {
t.Fatal(err)
}
if a, e := ranges[expectedInitialRanges-1:], splits; !rangesMatchSplits(a, e) {
t.Fatalf("Found ranges: %v\nexpected: %v", a, e)
}
}
示例14: TestSchemaChangeRetry
// Test schema changes are retried and complete properly. This also checks
// that a mutation checkpoint reduces the number of chunks operated on during
// a retry.
func TestSchemaChangeRetry(t *testing.T) {
defer leaktest.AfterTest(t)()
params, _ := createTestServerParams()
attempts := 0
seenSpan := roachpb.Span{}
params.Knobs = base.TestingKnobs{
SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{
RunBeforeBackfillChunk: func(sp roachpb.Span) error {
attempts++
// Fail somewhere in the middle.
if attempts == 3 {
return context.DeadlineExceeded
}
if seenSpan.Key != nil {
// Check that the keys are never reevaluated
if seenSpan.Key.Compare(sp.Key) >= 0 {
t.Errorf("reprocessing span %s, already seen span %s", sp, seenSpan)
}
if !seenSpan.EndKey.Equal(sp.EndKey) {
t.Errorf("different EndKey: span %s, already seen span %s", sp, seenSpan)
}
}
seenSpan = sp
return nil
},
// Disable asynchronous schema change execution to allow
// synchronous path to run schema changes.
AsyncExecNotification: asyncSchemaChangerDisabled,
WriteCheckpointInterval: time.Nanosecond,
},
}
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v INT);
`); err != nil {
t.Fatal(err)
}
// Bulk insert.
maxValue := 5000
if err := bulkInsertIntoTable(sqlDB, maxValue); err != nil {
t.Fatal(err)
}
// Add an index and check that it succeeds.
if _, err := sqlDB.Exec("CREATE UNIQUE INDEX foo ON t.test (v)"); err != nil {
t.Fatal(err)
}
// The schema change succeeded. Verify that the index foo over v is
// consistent.
rows, err := sqlDB.Query(`SELECT v from [email protected]`)
if err != nil {
t.Fatal(err)
}
count := 0
for ; rows.Next(); count++ {
var val int
if err := rows.Scan(&val); err != nil {
t.Errorf("row %d scan failed: %s", count, err)
continue
}
if count != val {
t.Errorf("e = %d, v = %d", count, val)
}
}
if err := rows.Err(); err != nil {
t.Fatal(err)
}
if eCount := maxValue + 1; eCount != count {
t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count)
}
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
tableEnd := tablePrefix.PrefixEnd()
numKeysPerRow := 2
if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil {
t.Fatal(err)
} else if e := numKeysPerRow * (maxValue + 1); len(kvs) != e {
t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
}
// Add a column and check that it works.
attempts = 0
seenSpan = roachpb.Span{}
if _, err := sqlDB.Exec("ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4')"); err != nil {
t.Fatal(err)
}
rows, err = sqlDB.Query(`SELECT x from t.test`)
if err != nil {
t.Fatal(err)
}
//.........這裏部分代碼省略.........
示例15: TestAbortSchemaChangeBackfill
//.........這裏部分代碼省略.........
// The two drop cases (column and index) do not need to be tested here
// because the INSERT down below will not insert an entry for a dropped
// column or index, however, it's still nice to have them just in case
// INSERT gets messed up.
testCases := []struct {
sql string
// Each schema change adds/drops a schema element that affects the
// number of keys representing a table row.
expectedNumKeysPerRow int
}{
{"ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4')", 2},
{"ALTER TABLE t.test DROP x", 1},
{"CREATE UNIQUE INDEX foo ON t.test (v)", 2},
{"DROP INDEX [email protected]", 1},
}
for i, testCase := range testCases {
t.Run(testCase.sql, func(t *testing.T) {
// Delete two rows so that the table size is smaller than a backfill
// chunk. The two values will be added later to make the table larger
// than a backfill chunk after the schema change backfill is aborted.
for i := 0; i < 2; i++ {
if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, i); err != nil {
t.Fatal(err)
}
}
backfillNotification = make(chan struct{})
commandsDone = make(chan struct{})
atomic.StoreUint32(&dontAbortBackfill, 0)
// Run the column schema change in a separate goroutine.
var wg sync.WaitGroup
wg.Add(1)
go func() {
// Start schema change that eventually runs a backfill.
if _, err := sqlDB.Exec(testCase.sql); err != nil {
t.Error(err)
}
wg.Done()
}()
// Wait until the schema change backfill has finished writing its
// intents.
<-backfillNotification
// Delete a row that will push the backfill transaction.
if _, err := sqlDB.Exec(`
BEGIN TRANSACTION PRIORITY HIGH;
DELETE FROM t.test WHERE k = 2;
COMMIT;
`); err != nil {
t.Fatal(err)
}
// Add missing rows so that the table exceeds the size of a
// backfill chunk.
for i := 0; i < 3; i++ {
if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, i, i); err != nil {
t.Fatal(err)
}
}
// Release backfill so that it can try to commit and in the
// process discover that it was aborted.
close(commandsDone)
wg.Wait() // for schema change to complete
// Backfill retry happened.
if count, e := atomic.SwapInt64(&retriedBackfill, 0), int64(1); count != e {
t.Fatalf("expected = %d, found = %d", e, count)
}
// 1 failed + 2 retried backfill chunks.
expectNumBackfills := int64(3)
if i == len(testCases)-1 {
// The DROP INDEX case: The above INSERTs do not add any index
// entries for the inserted rows, so the index remains smaller
// than a backfill chunk and is dropped in a single retried
// backfill chunk.
expectNumBackfills = 2
}
if count := atomic.SwapInt64(&backfillCount, 0); count != expectNumBackfills {
t.Fatalf("expected = %d, found = %d", expectNumBackfills, count)
}
// Verify the number of keys left behind in the table to validate
// schema change operations.
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
tableEnd := tablePrefix.PrefixEnd()
if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil {
t.Fatal(err)
} else if e := testCase.expectedNumKeysPerRow * (maxValue + 1); len(kvs) != e {
t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
}
})
}
}