本文整理汇总了Golang中github.com/cockroachdb/cockroach/sql.GetInitialSystemValues函数的典型用法代码示例。如果您正苦于以下问题:Golang GetInitialSystemValues函数的具体用法?Golang GetInitialSystemValues怎么用?Golang GetInitialSystemValues使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GetInitialSystemValues函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createTestStoreWithEngine
// createTestStoreWithEngine creates a test store using the given engine and clock.
// The caller is responsible for closing the store on exit.
func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock,
bootstrap bool, sCtx *storage.StoreContext) (*storage.Store, *stop.Stopper) {
stopper := stop.NewStopper()
rpcContext := rpc.NewContext(&base.Context{}, clock, stopper)
if sCtx == nil {
// make a copy
ctx := storage.TestStoreContext
sCtx = &ctx
}
nodeDesc := &proto.NodeDescriptor{NodeID: 1}
sCtx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
localSender := kv.NewLocalSender()
rpcSend := func(_ rpc.Options, _ string, _ []net.Addr,
getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
_ *rpc.Context) ([]gogoproto.Message, error) {
call := proto.Call{
Args: getArgs(nil /* net.Addr */).(proto.Request),
Reply: getReply().(proto.Response),
}
localSender.Send(context.Background(), call)
return []gogoproto.Message{call.Reply}, call.Reply.Header().GoError()
}
// Mostly makes sure that we don't see a warning per request.
{
if err := sCtx.Gossip.AddInfoProto(gossip.MakeNodeIDKey(nodeDesc.NodeID), nodeDesc, time.Hour); err != nil {
t.Fatal(err)
}
if err := sCtx.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatal(err)
}
}
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: clock,
RPCSend: rpcSend, // defined above
RangeDescriptorDB: localSender, // for descriptor lookup
}, sCtx.Gossip)
sender := kv.NewTxnCoordSender(distSender, clock, false, nil, stopper)
sCtx.Clock = clock
sCtx.DB = client.NewDB(sender)
sCtx.Transport = multiraft.NewLocalRPCTransport(stopper)
// TODO(bdarnell): arrange to have the transport closed.
store := storage.NewStore(*sCtx, eng, nodeDesc)
if bootstrap {
if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
}
localSender.AddStore(store)
if bootstrap {
if err := store.BootstrapRange(sql.GetInitialSystemValues()); err != nil {
t.Fatal(err)
}
}
if err := store.Start(stopper); err != nil {
t.Fatal(err)
}
return store, stopper
}
示例2: BootstrapCluster
// BootstrapCluster bootstraps a multiple stores using the provided engines and
// cluster ID. The first bootstrapped store contains a single range spanning
// all keys. Initial range lookup metadata is populated for the range.
//
// Returns a KV client for unittest purposes. Caller should close the returned
// client.
func BootstrapCluster(clusterID string, engines []engine.Engine, stopper *stop.Stopper) (*client.DB, error) {
ctx := storage.StoreContext{}
ctx.ScanInterval = 10 * time.Minute
ctx.Clock = hlc.NewClock(hlc.UnixNano)
// Create a KV DB with a local sender.
lSender := kv.NewLocalSender()
sender := kv.NewTxnCoordSender(lSender, ctx.Clock, false, nil, stopper)
ctx.DB = client.NewDB(sender)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
for i, eng := range engines {
sIdent := roachpb.StoreIdent{
ClusterID: clusterID,
NodeID: 1,
StoreID: roachpb.StoreID(i + 1),
}
// The bootstrapping store will not connect to other nodes so its
// StoreConfig doesn't really matter.
s := storage.NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})
// Verify the store isn't already part of a cluster.
if len(s.Ident.ClusterID) > 0 {
return nil, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
}
// Bootstrap store to persist the store ident.
if err := s.Bootstrap(sIdent, stopper); err != nil {
return nil, err
}
// Create first range, writing directly to engine. Note this does
// not create the range, just its data. Only do this if this is the
// first store.
if i == 0 {
// TODO(marc): this is better than having storage/ import sql, but still
// not great. Find a better place to keep those.
initialValues := sql.GetInitialSystemValues()
if err := s.BootstrapRange(initialValues); err != nil {
return nil, err
}
}
if err := s.Start(stopper); err != nil {
return nil, err
}
lSender.AddStore(s)
// Initialize node and store ids. Only initialize the node once.
if i == 0 {
if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil {
return nil, util.Errorf("expected to initialize node id allocator to %d, got %d: %s",
sIdent.NodeID, nodeID, err)
}
}
if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil {
return nil, util.Errorf("expected to initialize store id allocator to %d, got %d: %s",
sIdent.StoreID, storeID, err)
}
}
return ctx.DB, nil
}
示例3: TestSingleRangeReverseScan
// TestSingleRangeReverseScan verifies that ReverseScan gets the right results
// on a single range.
func TestSingleRangeReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)
s, db := initReverseScanTestEnv(t)
defer s.Stop()
// Case 1: Request.EndKey is in the middle of the range.
if rows, err := db.ReverseScan("b", "d", 0); err != nil {
t.Fatalf("unexpected error on ReverseScan: %s", err)
} else if l := len(rows); l != 2 {
t.Errorf("expected 2 rows; got %d", l)
}
// Case 2: Request.EndKey is equal to the EndKey of the range.
if rows, err := db.ReverseScan("e", "g", 0); err != nil {
t.Fatalf("unexpected error on ReverseScan: %s", err)
} else if l := len(rows); l != 2 {
t.Errorf("expected 2 rows; got %d", l)
}
// Case 3: Test roachpb.KeyMax
// This span covers the system DB keys.
wanted := 1 + len(sql.GetInitialSystemValues())
if rows, err := db.ReverseScan("g", roachpb.KeyMax, 0); err != nil {
t.Fatalf("unexpected error on ReverseScan: %s", err)
} else if l := len(rows); l != wanted {
t.Errorf("expected %d rows; got %d", wanted, l)
}
// Case 4: Test keys.SystemMax
if rows, err := db.ReverseScan(keys.SystemMax, "b", 0); err != nil {
t.Fatalf("unexpected error on ReverseScan: %s", err)
} else if l := len(rows); l != 1 {
t.Errorf("expected 1 row; got %d", l)
}
}
示例4: addStore
// AddStore creates a new store on the same Transport but doesn't create any ranges.
func (m *multiTestContext) addStore() {
idx := len(m.stores)
var clock *hlc.Clock
if len(m.clocks) > idx {
clock = m.clocks[idx]
} else {
clock = m.clock
m.clocks = append(m.clocks, clock)
}
var eng engine.Engine
var needBootstrap bool
if len(m.engines) > idx {
eng = m.engines[idx]
} else {
eng = engine.NewInMem(proto.Attributes{}, 1<<20)
m.engines = append(m.engines, eng)
needBootstrap = true
// Add an extra refcount to the engine so the underlying rocksdb instances
// aren't closed when stopping and restarting the stores.
// These refcounts are removed in Stop().
if err := eng.Open(); err != nil {
m.t.Fatal(err)
}
}
stopper := stop.NewStopper()
ctx := m.makeContext(idx)
store := storage.NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: proto.NodeID(idx + 1)})
if needBootstrap {
err := store.Bootstrap(proto.StoreIdent{
NodeID: proto.NodeID(idx + 1),
StoreID: proto.StoreID(idx + 1),
}, stopper)
if err != nil {
m.t.Fatal(err)
}
// Bootstrap the initial range on the first store
if idx == 0 {
if err := store.BootstrapRange(sql.GetInitialSystemValues()); err != nil {
m.t.Fatal(err)
}
}
}
if err := store.Start(stopper); err != nil {
m.t.Fatal(err)
}
store.WaitForInit()
m.stores = append(m.stores, store)
if len(m.senders) == idx {
m.senders = append(m.senders, kv.NewLocalSender())
}
m.senders[idx].AddStore(store)
// Save the store identities for later so we can use them in
// replication operations even while the store is stopped.
m.idents = append(m.idents, store.Ident)
m.stoppers = append(m.stoppers, stopper)
}
示例5: TestInitialKeys
func TestInitialKeys(t *testing.T) {
defer leaktest.AfterTest(t)
kv := sql.GetInitialSystemValues()
// IDGenerator + 2 for each table/database.
if actual, expected := len(kv), 1+2*sql.NumUsedSystemIDs; actual != expected {
t.Fatalf("Wrong number of initial sql kv pairs: %d, wanted %d", actual, expected)
}
}
示例6: TestBootstrapCluster
// TestBootstrapCluster verifies the results of bootstrapping a
// cluster. Uses an in memory engine.
func TestBootstrapCluster(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
e := engine.NewInMem(proto.Attributes{}, 1<<20)
localDB, err := BootstrapCluster("cluster-1", []engine.Engine{e}, stopper)
if err != nil {
t.Fatal(err)
}
defer stopper.Stop()
// Scan the complete contents of the local database.
rows, err := localDB.Scan(keys.LocalPrefix.PrefixEnd(), proto.KeyMax, 0)
if err != nil {
t.Fatal(err)
}
var foundKeys proto.KeySlice
for _, kv := range rows {
foundKeys = append(foundKeys, kv.Key)
}
var expectedKeys = proto.KeySlice{
proto.MakeKey(proto.Key("\x00\x00meta1"), proto.KeyMax),
proto.MakeKey(proto.Key("\x00\x00meta2"), proto.KeyMax),
proto.Key("\x00node-idgen"),
proto.Key("\x00range-tree-root"),
proto.Key("\x00store-idgen"),
proto.Key("\x00zone"),
}
// Add the initial keys for sql.
for _, kv := range sql.GetInitialSystemValues() {
expectedKeys = append(expectedKeys, kv.Key)
}
// Resort the list. The sql values are not sorted.
sort.Sort(expectedKeys)
if !reflect.DeepEqual(foundKeys, expectedKeys) {
t.Errorf("expected keys mismatch:\n%s\n -- vs. -- \n\n%s",
formatKeys(foundKeys), formatKeys(expectedKeys))
}
// TODO(spencer): check values.
}
示例7: TestStoreRangeSystemSplits
// TestStoreRangeSystemSplits verifies that splits are based on the
// contents of the SystemDB span.
func TestStoreRangeSystemSplits(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
defer stopper.Stop()
// Write the initial sql values to the system DB as well
// as the equivalent of table descriptors for X user tables.
// This does two things:
// - descriptor IDs are used to determine split keys
// - the write triggers a SystemConfig update and gossip.
// We should end up with splits at each user table prefix.
if err := store.DB().Txn(func(txn *client.Txn) error {
txn.SetSystemDBTrigger()
for _, kv := range sql.GetInitialSystemValues() {
if err := txn.Put(kv.Key, kv.Value.GetRawBytes()); err != nil {
return err
}
}
for i := 1; i <= 5; i++ {
// We don't care about the values, just the keys.
k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + i))
v, err := txn.Get(k)
if err != nil {
return err
}
if err := txn.Put(k, v.ValueBytes()); err != nil {
return err
}
}
return nil
}); err != nil {
t.Fatal(err)
}
expKeys := []roachpb.Key{}
for i := 1; i <= 5; i++ {
expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix,
keys.MakeTablePrefix(uint32(keys.MaxReservedDescID+i))))
}
expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))
if err := util.IsTrueWithin(func() bool {
rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
t.Fatalf("failed to scan meta2 keys: %s", err)
}
var keys []roachpb.Key
for _, r := range rows {
keys = append(keys, r.Key)
}
return reflect.DeepEqual(keys, expKeys)
}, 5*time.Second); err != nil {
t.Errorf("expected splits not found: %s", err)
}
// Write more descriptors for user tables.
if err := store.DB().Txn(func(txn *client.Txn) error {
txn.SetSystemDBTrigger()
// This time, only write the last table descriptor. Splits
// still occur for every ID.
// We don't care about the values, just the keys.
k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + 10))
v, err := txn.Get(k)
if err != nil {
return err
}
return txn.Put(k, v.ValueBytes())
}); err != nil {
t.Fatal(err)
}
expKeys = []roachpb.Key{}
for i := 1; i <= 10; i++ {
expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix,
keys.MakeTablePrefix(uint32(keys.MaxReservedDescID+i))))
}
expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))
if err := util.IsTrueWithin(func() bool {
rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
t.Fatalf("failed to scan meta2 keys: %s", err)
}
var keys []roachpb.Key
for _, r := range rows {
keys = append(keys, r.Key)
}
return reflect.DeepEqual(keys, expKeys)
}, 5*time.Second); err != nil {
t.Errorf("expected splits not found: %s", err)
}
}
示例8: TestStoreRangeSystemSplits
// TestStoreRangeSystemSplits verifies that splits are based on the
// contents of the SystemDB span.
func TestStoreRangeSystemSplits(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
defer stopper.Stop()
initialSystemValues := sql.GetInitialSystemValues()
numInitialValues := len(initialSystemValues)
// Write the initial sql values to the system DB as well
// as the equivalent of table descriptors for X user tables.
// This does two things:
// - descriptor IDs are used to determine split keys
// - the write triggers a SystemConfig update and gossip.
// We should end up with splits at each user table prefix.
if err := store.DB().Txn(func(txn *client.Txn) error {
txn.SetSystemDBTrigger()
for i, kv := range initialSystemValues {
bytes, err := kv.Value.GetBytes()
if err != nil {
log.Info(err)
continue
}
if err := txn.Put(kv.Key, bytes); err != nil {
return err
}
descID := keys.MaxReservedDescID + i + 1
// We don't care about the values, just the keys.
k := sql.MakeDescMetadataKey(sql.ID(descID))
if err := txn.Put(k, bytes); err != nil {
return err
}
}
return nil
}); err != nil {
t.Fatal(err)
}
verifySplitsAtTablePrefixes := func(maxTableID int) {
// We expect splits at each of the user tables, but not at the system
// tables boundaries.
expKeys := make([]roachpb.Key, 0, maxTableID+1)
for i := 1; i <= maxTableID; i++ {
expKeys = append(expKeys,
keys.MakeKey(keys.Meta2Prefix, keys.MakeTablePrefix(keys.MaxReservedDescID+uint32(i))),
)
}
expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))
util.SucceedsWithinDepth(1, t, 5*time.Second, func() error {
rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return err
}
keys := make([]roachpb.Key, 0, len(expKeys))
for _, r := range rows {
keys = append(keys, r.Key)
}
if !reflect.DeepEqual(keys, expKeys) {
return util.Errorf("expected split keys:\n%v\nbut found:\n%v", expKeys, keys)
}
return nil
})
}
verifySplitsAtTablePrefixes(len(initialSystemValues))
numTotalValues := numInitialValues + 5
// Write another, disjoint descriptor for a user table.
if err := store.DB().Txn(func(txn *client.Txn) error {
txn.SetSystemDBTrigger()
// This time, only write the last table descriptor. Splits
// still occur for every intervening ID.
// We don't care about the values, just the keys.
k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + numTotalValues))
return txn.Put(k, &sql.TableDescriptor{})
}); err != nil {
t.Fatal(err)
}
verifySplitsAtTablePrefixes(numTotalValues)
}
示例9: TestGetZoneConfig
// TestGetZoneConfig exercises config.GetZoneConfig and the sql hook for it.
func TestGetZoneConfig(t *testing.T) {
defer leaktest.AfterTest(t)
// Disable splitting. We're using bad attributes in zone configs
// to be able to match.
config.TestingDisableTableSplits = true
defer func() { config.TestingDisableTableSplits = false }()
s, sqlDB, _ := setup(t)
defer cleanup(s, sqlDB)
expectedLength := len(sql.GetInitialSystemValues()) - 1 /* ignore ID generator */
expectedCounter := uint32(keys.MaxReservedDescID + 1)
// Naming scheme for database and tables:
// db1 has tables tb11 and tb12
// db2 has tables tb21 and tb22
db1 := expectedCounter
if _, err := sqlDB.Exec(`CREATE DATABASE db1`); err != nil {
t.Fatal(err)
}
expectedCounter++
db2 := expectedCounter
if _, err := sqlDB.Exec(`CREATE DATABASE db2`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb11 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db1.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb12 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db1.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb21 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db2.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb22 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db2.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
// We've created 2 databases at 2 key each, and 4 tables at 2 keys each.
expectedLength += 2*2 + 4*2
cfg, err := getLatestConfig(s, expectedLength)
if err != nil {
t.Fatalf("failed to get latest system config: %s", err)
}
// We have no custom zone configs.
testCases := []struct {
key proto.Key
zoneCfg config.ZoneConfig
}{
{proto.KeyMin, *config.DefaultZoneConfig},
{keys.TableDataPrefix, *config.DefaultZoneConfig},
{keys.MakeTablePrefix(1), *config.DefaultZoneConfig},
{keys.MakeTablePrefix(keys.MaxReservedDescID), *config.DefaultZoneConfig},
{keys.MakeTablePrefix(db1), *config.DefaultZoneConfig},
{keys.MakeTablePrefix(db2), *config.DefaultZoneConfig},
{keys.MakeTablePrefix(tb11), *config.DefaultZoneConfig},
{keys.MakeTablePrefix(tb12), *config.DefaultZoneConfig},
{keys.MakeTablePrefix(tb21), *config.DefaultZoneConfig},
{keys.MakeTablePrefix(tb22), *config.DefaultZoneConfig},
}
for tcNum, tc := range testCases {
zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
if err != nil {
t.Fatalf("#%d: err=%s", tcNum, err)
}
if !reflect.DeepEqual(*zoneCfg, tc.zoneCfg) {
t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
}
}
// Now set some zone configs. We don't have a nice way of using table
// names for this, so we do raw puts.
// Here is the list of dbs/tables and whether they have a custom zone config:
// db1: true
// tb1: true
// tb2: false
// db1: false
// tb1: true
// tb2: false
db1Cfg := config.ZoneConfig{ReplicaAttrs: []proto.Attributes{{[]string{"db1"}}}}
tb11Cfg := config.ZoneConfig{ReplicaAttrs: []proto.Attributes{{[]string{"db1.tb1"}}}}
//.........这里部分代码省略.........
示例10: TestComputeSplits
func TestComputeSplits(t *testing.T) {
defer leaktest.AfterTest(t)
start := uint32(keys.MaxReservedDescID + 1)
// Real SQL system tables only.
baseSql := sql.GetInitialSystemValues()
// Real SQL system tables plus some user stuff.
userSql := append(sql.GetInitialSystemValues(),
descriptor(start), descriptor(start+1), descriptor(start+5))
allSplits := []uint32{start, start + 1, start + 2, start + 3, start + 4, start + 5}
testCases := []struct {
values []proto.KeyValue
start, end proto.Key
// Use ints in the testcase definitions, more readable.
splits []uint32
}{
// No data.
{nil, proto.KeyMin, proto.KeyMax, nil},
{nil, keys.MakeTablePrefix(start), proto.KeyMax, nil},
{nil, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), nil},
{nil, proto.KeyMin, keys.MakeTablePrefix(start + 10), nil},
// No user data.
{baseSql, proto.KeyMin, proto.KeyMax, nil},
{baseSql, keys.MakeTablePrefix(start), proto.KeyMax, nil},
{baseSql, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), nil},
{baseSql, proto.KeyMin, keys.MakeTablePrefix(start + 10), nil},
// User descriptors.
{userSql, proto.KeyMin, proto.KeyMax, allSplits},
{userSql, keys.MakeTablePrefix(start), proto.KeyMax, allSplits[1:]},
{userSql, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), allSplits[1:]},
{userSql, proto.KeyMin, keys.MakeTablePrefix(start + 10), allSplits},
{userSql, keys.MakeTablePrefix(start + 4), keys.MakeTablePrefix(start + 10), allSplits[5:]},
{userSql, keys.MakeTablePrefix(start + 5), keys.MakeTablePrefix(start + 10), nil},
{userSql, keys.MakeTablePrefix(start + 6), keys.MakeTablePrefix(start + 10), nil},
{userSql, keys.MakeKey(keys.MakeTablePrefix(start), proto.Key("foo")),
keys.MakeTablePrefix(start + 10), allSplits[1:]},
{userSql, keys.MakeKey(keys.MakeTablePrefix(start), proto.Key("foo")),
keys.MakeTablePrefix(start + 5), allSplits[1:5]},
{userSql, keys.MakeKey(keys.MakeTablePrefix(start), proto.Key("foo")),
keys.MakeKey(keys.MakeTablePrefix(start+5), proto.Key("bar")), allSplits[1:]},
{userSql, keys.MakeKey(keys.MakeTablePrefix(start), proto.Key("foo")),
keys.MakeKey(keys.MakeTablePrefix(start), proto.Key("morefoo")), nil},
}
cfg := config.SystemConfig{}
for tcNum, tc := range testCases {
cfg.Values = tc.values
splits := cfg.ComputeSplitKeys(tc.start, tc.end)
if len(splits) == 0 && len(tc.splits) == 0 {
continue
}
// Convert ints to actual keys.
expected := []proto.Key{}
if tc.splits != nil {
for _, s := range tc.splits {
expected = append(expected, keys.MakeTablePrefix(s))
}
}
if !reflect.DeepEqual(splits, expected) {
t.Errorf("#%d: bad splits:\ngot: %v\nexpected: %v", tcNum, splits, expected)
}
}
}
示例11: TestGetLargestID
func TestGetLargestID(t *testing.T) {
defer leaktest.AfterTest(t)
testCases := []struct {
values []proto.KeyValue
largest uint32
errStr string
}{
// No data.
{nil, 0, "empty system values"},
// Some data, but not from the system span.
{[]proto.KeyValue{plainKV("a", "b")}, 0, "descriptor table not found"},
// Some real data, but no descriptors.
{[]proto.KeyValue{
sqlKV(keys.NamespaceTableID, 1, 1),
sqlKV(keys.NamespaceTableID, 1, 2),
sqlKV(keys.UsersTableID, 1, 3),
}, 0, "descriptor table not found"},
// Single correct descriptor entry.
{[]proto.KeyValue{sqlKV(keys.DescriptorTableID, 1, 1)}, 1, ""},
// Surrounded by other data.
{[]proto.KeyValue{
sqlKV(keys.NamespaceTableID, 1, 20),
sqlKV(keys.NamespaceTableID, 1, 30),
sqlKV(keys.DescriptorTableID, 1, 8),
sqlKV(keys.ZonesTableID, 1, 40),
}, 8, ""},
// Descriptors with holes. Index ID does not matter.
{[]proto.KeyValue{
sqlKV(keys.DescriptorTableID, 1, 1),
sqlKV(keys.DescriptorTableID, 2, 5),
sqlKV(keys.DescriptorTableID, 3, 8),
sqlKV(keys.DescriptorTableID, 4, 12),
}, 12, ""},
// Real SQL layout.
{sql.GetInitialSystemValues(), keys.ZonesTableID, ""},
}
cfg := config.SystemConfig{}
for tcNum, tc := range testCases {
cfg.Values = tc.values
ret, err := cfg.GetLargestObjectID()
if tc.errStr == "" {
if err != nil {
t.Errorf("#%d: error: %v", tcNum, err)
continue
}
} else if !testutils.IsError(err, tc.errStr) {
t.Errorf("#%d: expected err=%s, got %v", tcNum, tc.errStr, err)
continue
}
if ret != tc.largest {
t.Errorf("#%d: expected largest=%d, got %d", tcNum, tc.largest, ret)
}
}
}