本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/base.TestServerArgs類的典型用法代碼示例。如果您正苦於以下問題:Golang TestServerArgs類的具體用法?Golang TestServerArgs怎麽用?Golang TestServerArgs使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了TestServerArgs類的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: AddServer
// AddServer creates a server with the specified arguments and appends it to
// the TestCluster.
func (tc *TestCluster) AddServer(t testing.TB, serverArgs base.TestServerArgs) {
serverArgs.Stopper = stop.NewStopper()
s, conn, _ := serverutils.StartServer(t, serverArgs)
tc.Servers = append(tc.Servers, s.(*server.TestServer))
tc.Conns = append(tc.Conns, conn)
tc.mu.Lock()
tc.mu.serverStoppers = append(tc.mu.serverStoppers, serverArgs.Stopper)
tc.mu.Unlock()
}
示例2: Start
// Start starts the TestServer by bootstrapping an in-memory store
// (defaults to maximum of 100M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.ServingAddr() after Start() for client connections.
// Use TestServer.Stopper().Stop() to shutdown the server after the test
// completes.
func (ts *TestServer) Start(params base.TestServerArgs) error {
if ts.Cfg == nil {
panic("Cfg not set")
}
if params.Stopper == nil {
params.Stopper = stop.NewStopper()
}
if !params.PartOfCluster {
// Change the replication requirements so we don't get log spam about ranges
// not being replicated enough.
cfg := config.DefaultZoneConfig()
cfg.NumReplicas = 1
fn := config.TestingSetDefaultZoneConfig(cfg)
params.Stopper.AddCloser(stop.CloserFn(fn))
}
// Needs to be called before NewServer to ensure resolvers are initialized.
if err := ts.Cfg.InitNode(); err != nil {
return err
}
var err error
ts.Server, err = NewServer(*ts.Cfg, params.Stopper)
if err != nil {
return err
}
// Our context must be shared with our server.
ts.Cfg = &ts.Server.cfg
if err := ts.Server.Start(context.Background()); err != nil {
return err
}
// If enabled, wait for initial splits to complete before returning control.
// If initial splits do not complete, the server is stopped before
// returning.
if stk, ok := ts.cfg.TestingKnobs.Store.(*storage.StoreTestingKnobs); ok &&
stk.DisableSplitQueue {
return nil
}
if err := ts.WaitForInitialSplits(); err != nil {
ts.Stop()
return err
}
return nil
}
示例3: makeTestConfigFromParams
// makeTestConfigtFromParams creates a Config from a TestServerParams.
func makeTestConfigFromParams(params base.TestServerArgs) Config {
cfg := makeTestConfig()
cfg.TestingKnobs = params.Knobs
if params.JoinAddr != "" {
cfg.JoinList = []string{params.JoinAddr}
}
cfg.Insecure = params.Insecure
cfg.SocketFile = params.SocketFile
cfg.RetryOptions = params.RetryOptions
if params.MetricsSampleInterval != 0 {
cfg.MetricsSampleInterval = params.MetricsSampleInterval
}
if params.RaftTickInterval != 0 {
cfg.RaftTickInterval = params.RaftTickInterval
}
if params.RaftElectionTimeoutTicks != 0 {
cfg.RaftElectionTimeoutTicks = params.RaftElectionTimeoutTicks
}
if knobs := params.Knobs.Store; knobs != nil {
if mo := knobs.(*storage.StoreTestingKnobs).MaxOffset; mo != 0 {
cfg.MaxOffset = mo
}
}
if params.ScanInterval != 0 {
cfg.ScanInterval = params.ScanInterval
}
if params.ScanMaxIdleTime != 0 {
cfg.ScanMaxIdleTime = params.ScanMaxIdleTime
}
if params.SSLCA != "" {
cfg.SSLCA = params.SSLCA
}
if params.SSLCert != "" {
cfg.SSLCert = params.SSLCert
}
if params.SSLCertKey != "" {
cfg.SSLCertKey = params.SSLCertKey
}
if params.TimeSeriesQueryWorkerMax != 0 {
cfg.TimeSeriesServerConfig.QueryWorkerMax = params.TimeSeriesQueryWorkerMax
}
if params.DisableEventLog {
cfg.EventLogEnabled = false
}
cfg.JoinList = []string{params.JoinAddr}
if cfg.Insecure {
// Whenever we can (i.e. in insecure mode), use IsolatedTestAddr
// to prevent issues that can occur when running a test under
// stress.
cfg.Addr = util.IsolatedTestAddr.String()
cfg.AdvertiseAddr = util.IsolatedTestAddr.String()
cfg.HTTPAddr = util.IsolatedTestAddr.String()
} else {
cfg.Addr = util.TestAddr.String()
cfg.AdvertiseAddr = util.TestAddr.String()
cfg.HTTPAddr = util.TestAddr.String()
}
if params.Addr != "" {
cfg.Addr = params.Addr
cfg.AdvertiseAddr = params.Addr
}
// Ensure we have the correct number of engines. Add in-memory ones where
// needed. There must be at least one store/engine.
if len(params.StoreSpecs) == 0 {
params.StoreSpecs = []base.StoreSpec{base.DefaultTestStoreSpec}
}
// Validate the store specs.
for _, storeSpec := range params.StoreSpecs {
if storeSpec.InMemory {
if storeSpec.SizePercent > 0 {
panic(fmt.Sprintf("test server does not yet support in memory stores based on percentage of total memory: %s", storeSpec))
}
} else {
// TODO(bram): This will require some cleanup of on disk files.
panic(fmt.Sprintf("test server does not yet support on disk stores: %s", storeSpec))
}
}
// Copy over the store specs.
cfg.Stores = base.StoreSpecList{Specs: params.StoreSpecs}
if cfg.TestingKnobs.Store == nil {
cfg.TestingKnobs.Store = &storage.StoreTestingKnobs{}
}
cfg.TestingKnobs.Store.(*storage.StoreTestingKnobs).SkipMinSizeCheck = true
return cfg
}
示例4: StartTestCluster
// StartTestCluster starts up a TestCluster made up of `nodes` in-memory testing
// servers.
// The cluster should be stopped using cluster.Stop().
func StartTestCluster(t testing.TB, nodes int, args base.TestClusterArgs) *TestCluster {
if nodes < 1 {
t.Fatal("invalid cluster size: ", nodes)
}
if args.ServerArgs.JoinAddr != "" {
t.Fatal("can't specify a join addr when starting a cluster")
}
if args.ServerArgs.Stopper != nil {
t.Fatal("can't set individual server stoppers when starting a cluster")
}
storeKnobs := args.ServerArgs.Knobs.Store
if storeKnobs != nil &&
(storeKnobs.(*storage.StoreTestingKnobs).DisableSplitQueue ||
storeKnobs.(*storage.StoreTestingKnobs).DisableReplicateQueue) {
t.Fatal("can't disable an individual server's queues when starting a cluster; " +
"the cluster controls replication")
}
switch args.ReplicationMode {
case base.ReplicationAuto:
case base.ReplicationManual:
if args.ServerArgs.Knobs.Store == nil {
args.ServerArgs.Knobs.Store = &storage.StoreTestingKnobs{}
}
storeKnobs := args.ServerArgs.Knobs.Store.(*storage.StoreTestingKnobs)
storeKnobs.DisableSplitQueue = true
storeKnobs.DisableReplicateQueue = true
default:
t.Fatal("unexpected replication mode")
}
tc := &TestCluster{}
tc.stopper = stop.NewStopper()
for i := 0; i < nodes; i++ {
var serverArgs base.TestServerArgs
if perNodeServerArgs, ok := args.ServerArgsPerNode[i]; ok {
serverArgs = perNodeServerArgs
} else {
serverArgs = args.ServerArgs
}
serverArgs.PartOfCluster = true
if i > 0 {
serverArgs.JoinAddr = tc.Servers[0].ServingAddr()
}
tc.AddServer(t, serverArgs)
}
// Create a closer that will stop the individual server stoppers when the
// cluster stopper is stopped.
tc.stopper.AddCloser(stop.CloserFn(tc.stopServers))
tc.WaitForStores(t, tc.Servers[0].Gossip())
// TODO(peter): We should replace the hardcoded 3 with the default ZoneConfig
// replication factor.
if args.ReplicationMode == base.ReplicationAuto && nodes >= 3 {
if err := tc.waitForFullReplication(); err != nil {
t.Fatal(err)
}
}
return tc
}