本文整理匯總了Golang中github.com/cockroachdb/cockroach/config.DefaultZoneConfig函數的典型用法代碼示例。如果您正苦於以下問題:Golang DefaultZoneConfig函數的具體用法?Golang DefaultZoneConfig怎麽用?Golang DefaultZoneConfig使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了DefaultZoneConfig函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestZoneConfigValidate
func TestZoneConfigValidate(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
cfg config.ZoneConfig
expected string
}{
{
config.ZoneConfig{},
"attributes for at least one replica must be specified in zone config",
},
{
config.ZoneConfig{
ReplicaAttrs: make([]roachpb.Attributes, 2),
},
"at least 3 replicas are required for multi-replica configurations",
},
{
config.ZoneConfig{
ReplicaAttrs: make([]roachpb.Attributes, 1),
},
"RangeMaxBytes 0 less than minimum allowed",
},
{
config.ZoneConfig{
ReplicaAttrs: make([]roachpb.Attributes, 1),
RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes,
},
"",
},
{
config.ZoneConfig{
ReplicaAttrs: make([]roachpb.Attributes, 1),
RangeMinBytes: config.DefaultZoneConfig().RangeMaxBytes,
RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes,
},
"is greater than or equal to RangeMaxBytes",
},
}
for i, c := range testCases {
err := c.cfg.Validate()
if c.expected == "" {
if err != nil {
t.Fatalf("%d: expected success, but got %v", i, err)
}
} else if !testutils.IsError(err, c.expected) {
t.Fatalf("%d: expected %s, but got %v", i, c.expected, err)
}
}
}
示例2: Start
// Start starts the TestServer by bootstrapping an in-memory store
// (defaults to maximum of 100M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.ServingAddr() after Start() for client connections.
// Use TestServer.Stopper().Stop() to shutdown the server after the test
// completes.
func (ts *TestServer) Start(params base.TestServerArgs) error {
if ts.Ctx == nil {
panic("Ctx not set")
}
if params.Stopper == nil {
params.Stopper = stop.NewStopper()
}
if !params.PartOfCluster {
// Change the replication requirements so we don't get log spam about ranges
// not being replicated enough.
cfg := config.DefaultZoneConfig()
cfg.ReplicaAttrs = []roachpb.Attributes{{}}
fn := config.TestingSetDefaultZoneConfig(cfg)
params.Stopper.AddCloser(stop.CloserFn(fn))
}
// Needs to be called before NewServer to ensure resolvers are initialized.
if err := ts.Ctx.InitNode(); err != nil {
return err
}
// Ensure we have the correct number of engines. Add in-memory ones where
// needed. There must be at least one store/engine.
if params.StoresPerNode < 1 {
params.StoresPerNode = 1
}
for i := len(ts.Ctx.Engines); i < params.StoresPerNode; i++ {
ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, params.Stopper))
}
var err error
ts.Server, err = NewServer(*ts.Ctx, params.Stopper)
if err != nil {
return err
}
// Our context must be shared with our server.
ts.Ctx = &ts.Server.ctx
if err := ts.Server.Start(); err != nil {
return err
}
// If enabled, wait for initial splits to complete before returning control.
// If initial splits do not complete, the server is stopped before
// returning.
if stk, ok := ts.ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs); ok &&
stk.DisableSplitQueue {
return nil
}
if err := ts.WaitForInitialSplits(); err != nil {
ts.Stop()
return err
}
return nil
}
示例3: StartTestCluster
// StartTestCluster starts up a TestCluster made up of `nodes` in-memory testing
// servers.
// The cluster should be stopped using cluster.Stopper().Stop().
func StartTestCluster(t testing.TB, nodes int, args ClusterArgs) *TestCluster {
if nodes < 1 {
t.Fatal("invalid cluster size: ", nodes)
}
if args.ServerArgs.JoinAddr != "" {
t.Fatal("can't specify a join addr when starting a cluster")
}
if args.ServerArgs.Stopper != nil {
t.Fatal("can't set individual server stoppers when starting a cluster")
}
storeKnobs := args.ServerArgs.Knobs.Store
if storeKnobs != nil &&
(storeKnobs.(*storage.StoreTestingKnobs).DisableSplitQueue ||
storeKnobs.(*storage.StoreTestingKnobs).DisableReplicateQueue) {
t.Fatal("can't disable an individual server's queues when starting a cluster; " +
"the cluster controls replication")
}
if args.Stopper == nil {
args.Stopper = stop.NewStopper()
args.ServerArgs.Stopper = args.Stopper
}
switch args.ReplicationMode {
case ReplicationFull:
// Force all ranges to be replicated everywhere.
cfg := config.DefaultZoneConfig()
cfg.ReplicaAttrs = make([]roachpb.Attributes, nodes)
fn := config.TestingSetDefaultZoneConfig(cfg)
args.Stopper.AddCloser(stop.CloserFn(fn))
case ReplicationManual:
if args.ServerArgs.Knobs.Store == nil {
args.ServerArgs.Knobs.Store = &storage.StoreTestingKnobs{}
}
storeKnobs := args.ServerArgs.Knobs.Store.(*storage.StoreTestingKnobs)
storeKnobs.DisableSplitQueue = true
storeKnobs.DisableReplicateQueue = true
default:
t.Fatal("unexpected replication mode")
}
tc := &TestCluster{}
args.ServerArgs.PartOfCluster = true
first, conn, _ := serverutils.StartServer(t, args.ServerArgs)
tc.Servers = append(tc.Servers, first.(*server.TestServer))
tc.Conns = append(tc.Conns, conn)
args.ServerArgs.JoinAddr = first.ServingAddr()
for i := 1; i < nodes; i++ {
s, conn, _ := serverutils.StartServer(t, args.ServerArgs)
tc.Servers = append(tc.Servers, s.(*server.TestServer))
tc.Conns = append(tc.Conns, conn)
}
tc.waitForStores(t)
return tc
}
示例4: newRange
// newRange returns a new range with the given rangeID.
func newRange(rangeID roachpb.RangeID, allocator storage.Allocator) *Range {
return &Range{
desc: roachpb.RangeDescriptor{
RangeID: rangeID,
},
zone: config.DefaultZoneConfig(),
replicas: make(map[roachpb.StoreID]replica),
allocator: allocator,
}
}
示例5: TestRangeSplitsWithWritePressure
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
defer leaktest.AfterTest(t)()
// Override default zone config.
cfg := config.DefaultZoneConfig()
cfg.RangeMaxBytes = 1 << 18
defer config.TestingSetDefaultZoneConfig(cfg)()
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions = retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 10 * time.Millisecond,
Multiplier: 2,
}
s, _ := createTestDBWithContext(t, dbCtx)
// This is purely to silence log spam.
config.TestingSetupZoneConfigHook(s.Stopper)
defer s.Stop()
// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
done := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)
// Check that we split 5 times in allotted time.
util.SucceedsSoon(t, func() error {
// Scan the txn records.
rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return util.Errorf("failed to scan meta2 keys: %s", err)
}
if lr := len(rows); lr < 5 {
return util.Errorf("expected >= 5 scans; got %d", lr)
}
return nil
})
close(done)
wg.Wait()
// This write pressure test often causes splits while resolve
// intents are in flight, causing them to fail with range key
// mismatch errors. However, LocalSender should retry in these
// cases. Check here via MVCC scan that there are no dangling write
// intents. We do this using a SucceedsSoon construct to account
// for timing of finishing the test writer and a possibly-ongoing
// asynchronous split.
util.SucceedsSoon(t, func() error {
if _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, 0, hlc.MaxTimestamp, true, nil); err != nil {
return util.Errorf("failed to verify no dangling intents: %s", err)
}
return nil
})
}
示例6: StartWithStopper
// StartWithStopper is the same as Start, but allows passing a stopper
// explicitly.
func (ts *TestServer) StartWithStopper(stopper *stop.Stopper) error {
if ts.Ctx == nil {
ts.Ctx = NewTestContext()
}
if stopper == nil {
stopper = stop.NewStopper()
}
// Change the replication requirements so we don't get log spam about ranges
// not being replicated enough.
cfg := config.DefaultZoneConfig()
cfg.ReplicaAttrs = []roachpb.Attributes{{}}
fn := config.TestingSetDefaultZoneConfig(cfg)
stopper.AddCloser(stop.CloserFn(fn))
// Needs to be called before NewServer to ensure resolvers are initialized.
if err := ts.Ctx.InitNode(); err != nil {
return err
}
var err error
ts.Server, err = NewServer(ts.Ctx, stopper)
if err != nil {
return err
}
// Ensure we have the correct number of engines. Add in-memory ones where
// needed. There must be at least one store/engine.
if ts.StoresPerNode < 1 {
ts.StoresPerNode = 1
}
for i := len(ts.Ctx.Engines); i < ts.StoresPerNode; i++ {
ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, ts.Server.stopper))
}
if err := ts.Server.Start(); err != nil {
return err
}
// If enabled, wait for initial splits to complete before returning control.
// If initial splits do not complete, the server is stopped before
// returning.
if config.TestingTableSplitsDisabled() {
return nil
}
if err := ts.WaitForInitialSplits(); err != nil {
ts.Stop()
return err
}
return nil
}
示例7: createDefaultZoneConfig
// Create the key/value pairs for the default zone config entry.
func createDefaultZoneConfig() []roachpb.KeyValue {
var ret []roachpb.KeyValue
value := roachpb.Value{}
desc := config.DefaultZoneConfig()
if err := value.SetProto(&desc); err != nil {
log.Fatalf("could not marshal %v", desc)
}
ret = append(ret, roachpb.KeyValue{
Key: MakeZoneKey(keys.RootNamespaceID),
Value: value,
})
return ret
}
示例8: TestSkipLargeReplicaSnapshot
func TestSkipLargeReplicaSnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, _, stopper := createTestStoreWithContext(t, &sCtx)
defer stopper.Stop()
const snapSize = 1 << 20 // 1 MiB
cfg := config.DefaultZoneConfig()
cfg.RangeMaxBytes = snapSize
defer config.TestingSetDefaultZoneConfig(cfg)()
rep, err := store.GetReplica(rangeID)
if err != nil {
t.Fatal(err)
}
rep.SetMaxBytes(snapSize)
if pErr := rep.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
fillTestRange(t, rep, snapSize)
if _, err := rep.GetSnapshot(context.Background()); err != nil {
t.Fatal(err)
}
fillTestRange(t, rep, snapSize*2)
if _, err := rep.Snapshot(); err != raft.ErrSnapshotTemporarilyUnavailable {
rep.mu.Lock()
after := rep.mu.state.Stats.Total()
rep.mu.Unlock()
t.Fatalf(
"snapshot of a very large range (%d / %d, needsSplit: %v, exceeds snap limit: %v) should fail but got %v",
after, rep.GetMaxBytes(),
rep.needsSplitBySize(), rep.exceedsDoubleSplitSizeLocked(), err,
)
}
}
示例9: SetupMultinodeTestCluster
// Starts up a cluster made of up `nodes` in-memory testing servers,
// creates database `name and returns open gosql.DB connections to each
// node (to the named db), as well as a cleanup func that stops and
// cleans up all nodes and connections.
func SetupMultinodeTestCluster(
t testing.TB, nodes int, name string,
) (MultinodeTestCluster, []*gosql.DB, *stop.Stopper) {
if nodes < 1 {
t.Fatal("invalid cluster size: ", nodes)
}
stopper := stop.NewStopper()
// Force all ranges to be replicated everywhere. This is needed until #7297 is
// fixed, otherwise starting a cluster takes forever.
cfg := config.DefaultZoneConfig()
cfg.ReplicaAttrs = make([]roachpb.Attributes, nodes)
fn := config.TestingSetDefaultZoneConfig(cfg)
stopper.AddCloser(stop.CloserFn(fn))
var servers []serverutils.TestServerInterface
var conns []*gosql.DB
args := base.TestServerArgs{
Stopper: stopper,
PartOfCluster: true,
UseDatabase: name,
}
first, conn, _ := serverutils.StartServer(t, args)
servers = append(servers, first)
conns = append(conns, conn)
args.JoinAddr = first.ServingAddr()
for i := 1; i < nodes; i++ {
s, conn, _ := serverutils.StartServer(t, args)
servers = append(servers, s)
conns = append(conns, conn)
}
if _, err := conns[0].Exec(fmt.Sprintf(`CREATE DATABASE %s`, name)); err != nil {
t.Fatal(err)
}
testCluster := MultinodeTestCluster{Servers: servers}
return testCluster, conns, first.Stopper()
}
示例10: TestDropTable
func TestDropTable(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sqlDB, kvDB := setup(t)
defer cleanup(s, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR);
INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd');
`); err != nil {
t.Fatal(err)
}
nameKey := sqlbase.MakeNameMetadataKey(keys.MaxReservedDescID+1, "kv")
gr, pErr := kvDB.Get(nameKey)
if pErr != nil {
t.Fatal(pErr)
}
if !gr.Exists() {
t.Fatalf("Name entry %q does not exist", nameKey)
}
descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))
desc := &sqlbase.Descriptor{}
if pErr := kvDB.GetProto(descKey, desc); pErr != nil {
t.Fatal(pErr)
}
tableDesc := desc.GetTable()
// Add a zone config for the table.
cfg := config.DefaultZoneConfig()
buf, err := protoutil.Marshal(&cfg)
if err != nil {
t.Fatal(err)
}
if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil {
t.Fatal(err)
}
zoneKey := sqlbase.MakeZoneKey(tableDesc.ID)
if gr, err := kvDB.Get(zoneKey); err != nil {
t.Fatal(err)
} else if !gr.Exists() {
t.Fatalf("zone config entry not found")
}
tablePrefix := keys.MakeTablePrefix(uint32(tableDesc.ID))
tableStartKey := roachpb.Key(tablePrefix)
tableEndKey := tableStartKey.PrefixEnd()
if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
t.Fatal(err)
} else if l := 6; len(kvs) != l {
t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
}
if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil {
t.Fatal(err)
}
// Test that deleted table cannot be used. This prevents regressions where
// name -> descriptor ID caches might make this statement erronously work.
if _, err := sqlDB.Exec(`SELECT * FROM t.kv`); !testutils.IsError(err, `table "t.kv" does not exist`) {
t.Fatalf("different error than expected: %s", err)
}
if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
t.Fatal(err)
} else if l := 0; len(kvs) != l {
t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
}
if gr, err := kvDB.Get(descKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
t.Fatalf("table descriptor still exists after the table is dropped")
}
if gr, err := kvDB.Get(nameKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
t.Fatalf("table namekey still exists after the table is dropped")
}
if gr, err := kvDB.Get(zoneKey); err != nil {
t.Fatal(err)
} else if gr.Exists() {
t.Fatalf("zone config entry still exists after the table is dropped")
}
}
示例11: TestGetZoneConfig
// TestGetZoneConfig exercises config.GetZoneConfig and the sql hook for it.
func TestGetZoneConfig(t *testing.T) {
defer leaktest.AfterTest(t)()
// Disable splitting. We're using bad attributes in zone configs
// to be able to match.
defer config.TestingDisableTableSplits()()
s, sqlDB, _ := setup(t)
defer cleanup(s, sqlDB)
expectedCounter := uint32(keys.MaxReservedDescID + 1)
defaultZoneConfig := config.DefaultZoneConfig()
defaultZoneConfig.RangeMinBytes = 1 << 20
defaultZoneConfig.RangeMaxBytes = 1 << 20
defaultZoneConfig.GC.TTLSeconds = 60
{
buf, err := proto.Marshal(&defaultZoneConfig)
if err != nil {
t.Fatal(err)
}
objID := keys.RootNamespaceID
if _, err = sqlDB.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf); err != nil {
t.Fatalf("problem writing zone %+v: %s", defaultZoneConfig, err)
}
}
// Naming scheme for database and tables:
// db1 has tables tb11 and tb12
// db2 has tables tb21 and tb22
db1 := expectedCounter
if _, err := sqlDB.Exec(`CREATE DATABASE db1`); err != nil {
t.Fatal(err)
}
expectedCounter++
db2 := expectedCounter
if _, err := sqlDB.Exec(`CREATE DATABASE db2`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb11 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db1.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb12 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db1.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb21 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db2.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
tb22 := expectedCounter
if _, err := sqlDB.Exec(`CREATE TABLE db2.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
t.Fatal(err)
}
expectedCounter++
cfg := forceNewConfig(t, s)
// We have no custom zone configs.
testCases := []struct {
key roachpb.RKey
zoneCfg *config.ZoneConfig
}{
{roachpb.RKeyMin, &defaultZoneConfig},
{keys.MakeTablePrefix(0), &defaultZoneConfig},
{keys.MakeTablePrefix(1), &defaultZoneConfig},
{keys.MakeTablePrefix(keys.MaxReservedDescID), &defaultZoneConfig},
{keys.MakeTablePrefix(db1), &defaultZoneConfig},
{keys.MakeTablePrefix(db2), &defaultZoneConfig},
{keys.MakeTablePrefix(tb11), &defaultZoneConfig},
{keys.MakeTablePrefix(tb12), &defaultZoneConfig},
{keys.MakeTablePrefix(tb21), &defaultZoneConfig},
{keys.MakeTablePrefix(tb22), &defaultZoneConfig},
}
for tcNum, tc := range testCases {
zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
if err != nil {
t.Fatalf("#%d: err=%s", tcNum, err)
}
if !reflect.DeepEqual(zoneCfg, tc.zoneCfg) {
t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
}
}
// Now set some zone configs. We don't have a nice way of using table
// names for this, so we do raw puts.
// Here is the list of dbs/tables and whether they have a custom zone config:
//.........這裏部分代碼省略.........
示例12: TableDetails
//.........這裏部分代碼省略.........
}
}
// Marshal SHOW GRANTS result.
{
const (
userCol = "User"
privilegesCol = "Privileges"
)
scanner := makeResultScanner(r.ResultList[2].Columns)
for _, row := range r.ResultList[2].Rows {
// Marshal grant, splitting comma-separated privileges into a proper slice.
var grant serverpb.TableDetailsResponse_Grant
var privileges string
if err := scanner.Scan(row, userCol, &grant.User); err != nil {
return nil, err
}
if err := scanner.Scan(row, privilegesCol, &privileges); err != nil {
return nil, err
}
grant.Privileges = strings.Split(privileges, ",")
resp.Grants = append(resp.Grants, grant)
}
}
// Marshal SHOW CREATE TABLE result.
{
const createTableCol = "CreateTable"
showResult := r.ResultList[3]
if len(showResult.Rows) != 1 {
return nil, s.serverErrorf("CreateTable response not available.")
}
scanner := makeResultScanner(showResult.Columns)
var createStmt string
if err := scanner.Scan(showResult.Rows[0], createTableCol, &createStmt); err != nil {
return nil, err
}
resp.CreateTableStatement = createStmt
}
// Get the number of ranges in the table. We get the key span for the table
// data. Then, we count the number of ranges that make up that key span.
{
var iexecutor sql.InternalExecutor
var tableSpan roachpb.Span
if err := s.server.db.Txn(func(txn *client.Txn) error {
var err error
tableSpan, err = iexecutor.GetTableSpan(s.getUser(req), txn, escDBName, escTableName)
return err
}); err != nil {
return nil, s.serverError(err)
}
tableRSpan := roachpb.RSpan{}
var err error
tableRSpan.Key, err = keys.Addr(tableSpan.Key)
if err != nil {
return nil, s.serverError(err)
}
tableRSpan.EndKey, err = keys.Addr(tableSpan.EndKey)
if err != nil {
return nil, s.serverError(err)
}
rangeCount, err := s.server.distSender.CountRanges(tableRSpan)
if err != nil {
return nil, s.serverError(err)
}
resp.RangeCount = rangeCount
}
// Query the zone configuration for this table.
{
path, err := s.queryDescriptorIDPath(session, []string{escDBName, escTableName})
if err != nil {
return nil, s.serverError(err)
}
id, zone, zoneExists, err := s.queryZonePath(session, path)
if err != nil {
return nil, s.serverError(err)
}
if !zoneExists {
zone = config.DefaultZoneConfig()
}
resp.ZoneConfig = zone
switch id {
case path[1]:
resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_DATABASE
case path[2]:
resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_TABLE
default:
resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_CLUSTER
}
}
return &resp, nil
}
示例13: defaultBinary
builderImage = "cockroachdb/builder"
builderTag = "20160611-170214"
builderImageFull = builderImage + ":" + builderTag
networkName = "cockroachdb_acceptance"
)
// DefaultTCP is the default SQL/RPC port specification.
const DefaultTCP nat.Port = base.DefaultPort + "/tcp"
const defaultHTTP nat.Port = base.DefaultHTTPPort + "/tcp"
var cockroachImage = flag.String("i", builderImageFull, "the docker image to run")
var cockroachBinary = flag.String("b", defaultBinary(), "the binary to run (if image == "+builderImage+")")
var cockroachEntry = flag.String("e", "", "the entry point for the image")
var waitOnStop = flag.Bool("w", false, "wait for the user to interrupt before tearing down the cluster")
var pwd = filepath.Clean(os.ExpandEnv("${PWD}"))
var maxRangeBytes = int64(config.DefaultZoneConfig().RangeMaxBytes)
// keyLen is the length (in bits) of the generated CA and node certs.
const keyLen = 1024
func defaultBinary() string {
gopath := filepath.SplitList(os.Getenv("GOPATH"))
if len(gopath) == 0 {
return ""
}
return gopath[0] + "/bin/linux_amd64/cockroach"
}
func exists(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
示例14: String
"sort"
"strconv"
"strings"
"unicode"
"github.com/dustin/go-humanize"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/util/humanizeutil"
)
// This file implements method receivers for members of server.Context struct
// -- 'Stores' and 'JoinList', which satisfies pflag's value interface
var minimumStoreSize = 10 * config.DefaultZoneConfig().RangeMaxBytes
// StoreSpec contains the details that can be specified in the cli pertaining
// to the --store flag.
type StoreSpec struct {
Path string
SizeInBytes int64
SizePercent float64
InMemory bool
Attributes roachpb.Attributes
}
// String returns a fully parsable version of the store spec.
func (ss StoreSpec) String() string {
var buffer bytes.Buffer
if len(ss.Path) != 0 {
示例15: TestAdminAPITableDetailsZone
func TestAdminAPITableDetailsZone(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
// Create database and table.
session := sql.NewSession(sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil)
setupQueries := []string{
"CREATE DATABASE test",
"CREATE TABLE test.tbl (val STRING)",
}
for _, q := range setupQueries {
res := ts.sqlExecutor.ExecuteStatements(context.Background(), session, q, nil)
if res.ResultList[0].Err != nil {
t.Fatalf("error executing '%s': %s", q, res.ResultList[0].Err)
}
}
// Function to verify the zone for test.tbl as returned by the Admin API.
verifyZone := func(expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel) {
var resp serverpb.TableDetailsResponse
if err := apiGet(s, "databases/test/tables/tbl", &resp); err != nil {
t.Fatal(err)
}
if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) {
t.Errorf("actual zone config %v did not match expected value %v", a, e)
}
if a, e := resp.ZoneConfigLevel, expectedLevel; a != e {
t.Errorf("actual ZoneConfigurationLevel %s did not match expected value %s", a, e)
}
if t.Failed() {
t.FailNow()
}
}
// Function to store a zone config for a given object ID.
setZone := func(zoneCfg config.ZoneConfig, id sqlbase.ID) {
zoneBytes, err := zoneCfg.Marshal()
if err != nil {
t.Fatal(err)
}
const query = `INSERT INTO system.zones VALUES($1, $2)`
params := parser.NewPlaceholderInfo()
params.SetValue(`1`, parser.NewDInt(parser.DInt(id)))
params.SetValue(`2`, parser.NewDBytes(parser.DBytes(zoneBytes)))
res := ts.sqlExecutor.ExecuteStatements(context.Background(), session, query, params)
if res.ResultList[0].Err != nil {
t.Fatalf("error executing '%s': %s", query, res.ResultList[0].Err)
}
}
// Verify zone matches cluster default.
verifyZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER)
// Get ID path for table. This will be an array of three IDs, containing the ID of the root namespace,
// the database, and the table (in that order).
idPath, err := ts.admin.queryDescriptorIDPath(context.Background(), session, []string{"test", "tbl"})
if err != nil {
t.Fatal(err)
}
// Apply zone configuration to database and check again.
dbZone := config.ZoneConfig{
RangeMinBytes: 456,
}
setZone(dbZone, idPath[1])
verifyZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE)
// Apply zone configuration to table and check again.
tblZone := config.ZoneConfig{
RangeMinBytes: 789,
}
setZone(tblZone, idPath[2])
verifyZone(tblZone, serverpb.ZoneConfigurationLevel_TABLE)
}