本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/config.DefaultZoneConfig函數的典型用法代碼示例。如果您正苦於以下問題:Golang DefaultZoneConfig函數的具體用法?Golang DefaultZoneConfig怎麽用?Golang DefaultZoneConfig使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了DefaultZoneConfig函數的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestZoneConfigValidate
func TestZoneConfigValidate(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
cfg config.ZoneConfig
expected string
}{
{
config.ZoneConfig{},
"attributes for at least one replica must be specified in zone config",
},
{
config.ZoneConfig{
NumReplicas: 2,
},
"at least 3 replicas are required for multi-replica configurations",
},
{
config.ZoneConfig{
NumReplicas: 1,
},
"RangeMaxBytes 0 less than minimum allowed",
},
{
config.ZoneConfig{
NumReplicas: 1,
RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes,
},
"",
},
{
config.ZoneConfig{
NumReplicas: 1,
RangeMinBytes: config.DefaultZoneConfig().RangeMaxBytes,
RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes,
},
"is greater than or equal to RangeMaxBytes",
},
}
for i, c := range testCases {
err := c.cfg.Validate()
if c.expected == "" {
if err != nil {
t.Fatalf("%d: expected success, but got %v", i, err)
}
} else if !testutils.IsError(err, c.expected) {
t.Fatalf("%d: expected %s, but got %v", i, c.expected, err)
}
}
}
示例2: newRange
// newRange returns a new range with the given rangeID.
func newRange(rangeID roachpb.RangeID, allocator storage.Allocator) *Range {
return &Range{
desc: roachpb.RangeDescriptor{
RangeID: rangeID,
},
zone: config.DefaultZoneConfig(),
replicas: make(map[roachpb.StoreID]replica),
allocator: allocator,
}
}
示例3: setup
func (t *parallelTest) setup(spec *parTestSpec) {
if spec.ClusterSize == 0 {
spec.ClusterSize = 1
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Cluster Size: %d", spec.ClusterSize)
}
args := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
SQLExecutor: &sql.ExecutorTestingKnobs{
WaitForGossipUpdate: true,
CheckStmtStringChange: true,
},
},
},
}
t.cluster = serverutils.StartTestCluster(t, spec.ClusterSize, args)
t.clients = make([][]*gosql.DB, spec.ClusterSize)
for i := range t.clients {
t.clients[i] = append(t.clients[i], t.cluster.ServerConn(i))
}
r0 := sqlutils.MakeSQLRunner(t, t.clients[0][0])
if spec.RangeSplitSize != 0 {
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Setting range split size: %d", spec.RangeSplitSize)
}
zoneCfg := config.DefaultZoneConfig()
zoneCfg.RangeMaxBytes = int64(spec.RangeSplitSize)
zoneCfg.RangeMinBytes = zoneCfg.RangeMaxBytes / 2
buf, err := protoutil.Marshal(&zoneCfg)
if err != nil {
t.Fatal(err)
}
objID := keys.RootNamespaceID
r0.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf)
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Creating database")
}
r0.Exec("CREATE DATABASE test")
for i := range t.clients {
sqlutils.MakeSQLRunner(t, t.clients[i][0]).Exec("SET DATABASE = test")
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Test setup done")
}
}
示例4: TestRangeSplitsWithWritePressure
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
defer leaktest.AfterTest(t)()
// Override default zone config.
cfg := config.DefaultZoneConfig()
cfg.RangeMaxBytes = 1 << 18
defer config.TestingSetDefaultZoneConfig(cfg)()
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions = retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 10 * time.Millisecond,
Multiplier: 2,
}
s, _ := createTestDBWithContext(t, dbCtx)
// This is purely to silence log spam.
config.TestingSetupZoneConfigHook(s.Stopper)
defer s.Stop()
// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
done := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)
// Check that we split 5 times in allotted time.
testutils.SucceedsSoon(t, func() error {
// Scan the txn records.
rows, err := s.DB.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return errors.Errorf("failed to scan meta2 keys: %s", err)
}
if lr := len(rows); lr < 5 {
return errors.Errorf("expected >= 5 scans; got %d", lr)
}
return nil
})
close(done)
wg.Wait()
// This write pressure test often causes splits while resolve
// intents are in flight, causing them to fail with range key
// mismatch errors. However, LocalSender should retry in these
// cases. Check here via MVCC scan that there are no dangling write
// intents. We do this using a SucceedsSoon construct to account
// for timing of finishing the test writer and a possibly-ongoing
// asynchronous split.
testutils.SucceedsSoon(t, func() error {
if _, _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil); err != nil {
return errors.Errorf("failed to verify no dangling intents: %s", err)
}
return nil
})
}
示例5: createDefaultZoneConfig
// Create the key/value pairs for the default zone config entry.
func createDefaultZoneConfig() []roachpb.KeyValue {
var ret []roachpb.KeyValue
value := roachpb.Value{}
desc := config.DefaultZoneConfig()
if err := value.SetProto(&desc); err != nil {
log.Fatalf(context.TODO(), "could not marshal %v", desc)
}
ret = append(ret, roachpb.KeyValue{
Key: MakeZoneKey(keys.RootNamespaceID),
Value: value,
})
return ret
}
示例6: TestSkipLargeReplicaSnapshot
func TestSkipLargeReplicaSnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
const snapSize = 5 * (keySize + valSize)
cfg := config.DefaultZoneConfig()
cfg.RangeMaxBytes = snapSize
defer config.TestingSetDefaultZoneConfig(cfg)()
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithConfig(t, stopper, &storeCfg)
rep, err := store.GetReplica(rangeID)
if err != nil {
t.Fatal(err)
}
rep.SetMaxBytes(snapSize)
if pErr := rep.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
if err := fillTestRange(rep, snapSize); err != nil {
t.Fatal(err)
}
if _, err := rep.GetSnapshot(context.Background(), "test"); err != nil {
t.Fatal(err)
}
rep.CloseOutSnap()
if err := fillTestRange(rep, snapSize*2); err != nil {
t.Fatal(err)
}
rep.mu.Lock()
_, err = rep.Snapshot()
rep.mu.Unlock()
if err != raft.ErrSnapshotTemporarilyUnavailable {
rep.mu.Lock()
after := rep.mu.state.Stats.Total()
rep.mu.Unlock()
t.Fatalf(
"snapshot of a very large range (%d / %d, needsSplit: %v, exceeds snap limit: %v) should fail but got %v",
after, rep.GetMaxBytes(),
rep.needsSplitBySize(), rep.exceedsDoubleSplitSizeLocked(), err,
)
}
}
示例7: Start
// Start starts the TestServer by bootstrapping an in-memory store
// (defaults to maximum of 100M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.ServingAddr() after Start() for client connections.
// Use TestServer.Stopper().Stop() to shutdown the server after the test
// completes.
func (ts *TestServer) Start(params base.TestServerArgs) error {
if ts.Cfg == nil {
panic("Cfg not set")
}
if params.Stopper == nil {
params.Stopper = stop.NewStopper()
}
if !params.PartOfCluster {
// Change the replication requirements so we don't get log spam about ranges
// not being replicated enough.
cfg := config.DefaultZoneConfig()
cfg.NumReplicas = 1
fn := config.TestingSetDefaultZoneConfig(cfg)
params.Stopper.AddCloser(stop.CloserFn(fn))
}
// Needs to be called before NewServer to ensure resolvers are initialized.
if err := ts.Cfg.InitNode(); err != nil {
return err
}
var err error
ts.Server, err = NewServer(*ts.Cfg, params.Stopper)
if err != nil {
return err
}
// Our context must be shared with our server.
ts.Cfg = &ts.Server.cfg
if err := ts.Server.Start(context.Background()); err != nil {
return err
}
// If enabled, wait for initial splits to complete before returning control.
// If initial splits do not complete, the server is stopped before
// returning.
if stk, ok := ts.cfg.TestingKnobs.Store.(*storage.StoreTestingKnobs); ok &&
stk.DisableSplitQueue {
return nil
}
if err := ts.WaitForInitialSplits(); err != nil {
ts.Stop()
return err
}
return nil
}
示例8: UpdateZoneConfig
// UpdateZoneConfig updates the default zone config for the cluster.
func (c *Cluster) UpdateZoneConfig(rangeMinBytes, rangeMaxBytes int64) {
zone := config.DefaultZoneConfig()
zone.RangeMinBytes = rangeMinBytes
zone.RangeMaxBytes = rangeMaxBytes
buf, err := protoutil.Marshal(&zone)
if err != nil {
log.Fatal(context.Background(), err)
}
_, err = c.DB[0].Exec(`UPSERT INTO system.zones (id, config) VALUES (0, $1)`, buf)
if err != nil {
log.Fatal(context.Background(), err)
}
}
示例9: TestAdminAPIZoneDetails
// TestAdminAPIZoneDetails verifies the zone configuration information returned
// for both DatabaseDetailsResponse AND TableDetailsResponse.
func TestAdminAPIZoneDetails(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
// Create database and table.
ac := log.AmbientContext{Tracer: tracing.NewTracer()}
ctx, span := ac.AnnotateCtxWithSpan(context.Background(), "test")
defer span.Finish()
session := sql.NewSession(
ctx, sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil, &sql.MemoryMetrics{})
session.StartUnlimitedMonitor()
setupQueries := []string{
"CREATE DATABASE test",
"CREATE TABLE test.tbl (val STRING)",
}
for _, q := range setupQueries {
res := ts.sqlExecutor.ExecuteStatements(session, q, nil)
defer res.Close()
if res.ResultList[0].Err != nil {
t.Fatalf("error executing '%s': %s", q, res.ResultList[0].Err)
}
}
// Function to verify the zone for table "test.tbl" as returned by the Admin
// API.
verifyTblZone := func(
expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel,
) {
var resp serverpb.TableDetailsResponse
if err := getAdminJSONProto(s, "databases/test/tables/tbl", &resp); err != nil {
t.Fatal(err)
}
if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) {
t.Errorf("actual table zone config %v did not match expected value %v", a, e)
}
if a, e := resp.ZoneConfigLevel, expectedLevel; a != e {
t.Errorf("actual table ZoneConfigurationLevel %s did not match expected value %s", a, e)
}
if t.Failed() {
t.FailNow()
}
}
// Function to verify the zone for database "test" as returned by the Admin
// API.
verifyDbZone := func(
expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel,
) {
var resp serverpb.DatabaseDetailsResponse
if err := getAdminJSONProto(s, "databases/test", &resp); err != nil {
t.Fatal(err)
}
if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) {
t.Errorf("actual db zone config %v did not match expected value %v", a, e)
}
if a, e := resp.ZoneConfigLevel, expectedLevel; a != e {
t.Errorf("actual db ZoneConfigurationLevel %s did not match expected value %s", a, e)
}
if t.Failed() {
t.FailNow()
}
}
// Function to store a zone config for a given object ID.
setZone := func(zoneCfg config.ZoneConfig, id sqlbase.ID) {
zoneBytes, err := zoneCfg.Marshal()
if err != nil {
t.Fatal(err)
}
const query = `INSERT INTO system.zones VALUES($1, $2)`
params := parser.NewPlaceholderInfo()
params.SetValue(`1`, parser.NewDInt(parser.DInt(id)))
params.SetValue(`2`, parser.NewDBytes(parser.DBytes(zoneBytes)))
res := ts.sqlExecutor.ExecuteStatements(session, query, params)
defer res.Close()
if res.ResultList[0].Err != nil {
t.Fatalf("error executing '%s': %s", query, res.ResultList[0].Err)
}
}
// Verify zone matches cluster default.
verifyDbZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER)
verifyTblZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER)
// Get ID path for table. This will be an array of three IDs, containing the ID of the root namespace,
// the database, and the table (in that order).
idPath, err := ts.admin.queryDescriptorIDPath(session, []string{"test", "tbl"})
if err != nil {
t.Fatal(err)
}
// Apply zone configuration to database and check again.
dbZone := config.ZoneConfig{
RangeMinBytes: 456,
}
setZone(dbZone, idPath[1])
//.........這裏部分代碼省略.........
示例10: TestDropDatabase
func TestDropDatabase(t *testing.T) {
defer leaktest.AfterTest(t)()
params, _ := createTestServerParams()
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
ctx := context.TODO()
// Fix the column families so the key counts below don't change if the
// family heuristics are updated.
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR, FAMILY (k), FAMILY (v));
INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd');
`); err != nil {
t.Fatal(err)
}
dbNameKey := sqlbase.MakeNameMetadataKey(keys.RootNamespaceID, "t")
r, err := kvDB.Get(ctx, dbNameKey)
if err != nil {
t.Fatal(err)
}
if !r.Exists() {
t.Fatalf(`database "t" does not exist`)
}
dbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(r.ValueInt()))
desc := &sqlbase.Descriptor{}
if err := kvDB.GetProto(ctx, dbDescKey, desc); err != nil {
t.Fatal(err)
}
dbDesc := desc.GetDatabase()
tbNameKey := sqlbase.MakeNameMetadataKey(dbDesc.ID, "kv")
gr, err := kvDB.Get(ctx, tbNameKey)
if err != nil {
t.Fatal(err)
}
if !gr.Exists() {
t.Fatalf(`table "kv" does not exist`)
}
tbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))
if err := kvDB.GetProto(ctx, tbDescKey, desc); err != nil {
t.Fatal(err)
}
tbDesc := desc.GetTable()
// Add a zone config for both the table and database.
cfg := config.DefaultZoneConfig()
buf, err := protoutil.Marshal(&cfg)
if err != nil {
t.Fatal(err)
}
if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tbDesc.ID, buf); err != nil {
t.Fatal(err)
}
if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, dbDesc.ID, buf); err != nil {
t.Fatal(err)
}
tbZoneKey := sqlbase.MakeZoneKey(tbDesc.ID)
dbZoneKey := sqlbase.MakeZoneKey(dbDesc.ID)
if gr, err := kvDB.Get(ctx, tbZoneKey); err != nil {
t.Fatal(err)
} else if !gr.Exists() {
t.Fatalf("table zone config entry not found")
}
if gr, err := kvDB.Get(ctx, dbZoneKey); err != nil {
t.Fatal(err)
} else if !gr.Exists() {
t.Fatalf("database zone config entry not found")
}
tablePrefix := keys.MakeTablePrefix(uint32(tbDesc.ID))
tableStartKey := roachpb.Key(tablePrefix)
tableEndKey := tableStartKey.PrefixEnd()
if kvs, err := kvDB.Scan(ctx, tableStartKey, tableEndKey, 0); err != nil {
t.Fatal(err)
} else if l := 6; len(kvs) != l {
t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
}
if _, err := sqlDB.Exec(`DROP DATABASE t`); err != nil {
t.Fatal(err)
}
if kvs, err := kvDB.Scan(ctx, tableStartKey, tableEndKey, 0); err != nil {
t.Fatal(err)
} else if l := 0; len(kvs) != l {
t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
}
if gr, err := kvDB.Get(ctx, tbDescKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
t.Fatalf("table descriptor still exists after database is dropped: %q", tbDescKey)
}
if gr, err := kvDB.Get(ctx, tbNameKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
//.........這裏部分代碼省略.........
示例11: TestDropTable
func TestDropTable(t *testing.T) {
defer leaktest.AfterTest(t)()
params, _ := createTestServerParams()
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
ctx := context.TODO()
numRows := 2*sql.TableTruncateChunkSize + 1
createKVTable(t, sqlDB, numRows)
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv")
nameKey := sqlbase.MakeNameMetadataKey(keys.MaxReservedDescID+1, "kv")
gr, err := kvDB.Get(ctx, nameKey)
if err != nil {
t.Fatal(err)
}
if !gr.Exists() {
t.Fatalf("Name entry %q does not exist", nameKey)
}
descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))
// Add a zone config for the table.
cfg := config.DefaultZoneConfig()
buf, err := protoutil.Marshal(&cfg)
if err != nil {
t.Fatal(err)
}
if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil {
t.Fatal(err)
}
zoneKey := sqlbase.MakeZoneKey(tableDesc.ID)
if gr, err := kvDB.Get(ctx, zoneKey); err != nil {
t.Fatal(err)
} else if !gr.Exists() {
t.Fatalf("zone config entry not found")
}
tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
checkKeyCount(t, kvDB, tablePrefix, 3*numRows)
if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil {
t.Fatal(err)
}
checkKeyCount(t, kvDB, tablePrefix, 0)
// Test that deleted table cannot be used. This prevents regressions where
// name -> descriptor ID caches might make this statement erronously work.
if _, err := sqlDB.Exec(`SELECT * FROM t.kv`); !testutils.IsError(err, `table "t.kv" does not exist`) {
t.Fatalf("different error than expected: %v", err)
}
if gr, err := kvDB.Get(ctx, descKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
t.Fatalf("table descriptor still exists after the table is dropped")
}
if gr, err := kvDB.Get(ctx, nameKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
t.Fatalf("table namekey still exists after the table is dropped")
}
if gr, err := kvDB.Get(ctx, zoneKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
t.Fatalf("zone config entry still exists after the table is dropped")
}
}
示例12: TestGetZoneConfig
// TestGetZoneConfig exercises config.GetZoneConfig and the sql hook for it.
func TestGetZoneConfig(t *testing.T) {
defer leaktest.AfterTest(t)()
params, _ := createTestServerParams()
srv, sqlDB, _ := serverutils.StartServer(t, params)
defer srv.Stopper().Stop()
s := srv.(*server.TestServer)
expectedCounter := uint32(keys.MaxReservedDescID)
defaultZoneConfig := config.DefaultZoneConfig()
defaultZoneConfig.RangeMinBytes = 1 << 20
defaultZoneConfig.RangeMaxBytes = 1 << 20
defaultZoneConfig.GC.TTLSeconds = 60
{
buf, err := protoutil.Marshal(&defaultZoneConfig)
if err != nil {
t.Fatal(err)
}
objID := keys.RootNamespaceID
if _, err = sqlDB.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf); err != nil {
t.Fatalf("problem writing zone %+v: %s", defaultZoneConfig, err)
}
}
// Naming scheme for database and tables:
// db1 has tables tb11 and tb12
// db2 has tables tb21 and tb22
expectedCounter++
db1 := expectedCounter
if _, err := sqlDB.Exec(`CREATE DATABASE db1`); err != nil {
t.Fatal(err)
}
expectedCounter++
db2 := expectedCounter
if _, err := sqlDB.Exec(`CREATE DATABASE db2`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb11 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db1.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb12 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db1.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb21 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db2.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb22 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db2.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
{
cfg := forceNewConfig(t, s)
// We have no custom zone configs.
testCases := []struct {
key roachpb.RKey
zoneCfg config.ZoneConfig
}{
{roachpb.RKeyMin, defaultZoneConfig},
{keys.MakeTablePrefix(0), defaultZoneConfig},
{keys.MakeTablePrefix(1), defaultZoneConfig},
{keys.MakeTablePrefix(keys.MaxReservedDescID), defaultZoneConfig},
{keys.MakeTablePrefix(db1), defaultZoneConfig},
{keys.MakeTablePrefix(db2), defaultZoneConfig},
{keys.MakeTablePrefix(tb11), defaultZoneConfig},
{keys.MakeTablePrefix(tb12), defaultZoneConfig},
{keys.MakeTablePrefix(tb21), defaultZoneConfig},
{keys.MakeTablePrefix(tb22), defaultZoneConfig},
}
for tcNum, tc := range testCases {
zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
if err != nil {
t.Fatalf("#%d: err=%s", tcNum, err)
}
if !proto.Equal(&zoneCfg, &tc.zoneCfg) {
t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
}
}
}
// Now set some zone configs. We don't have a nice way of using table
// names for this, so we do raw puts.
//.........這裏部分代碼省略.........