本文整理汇总了Golang中github.com/youtube/vitess/go/vt/discovery.NewHealthCheck函数的典型用法代码示例。如果您正苦于以下问题:Golang NewHealthCheck函数的具体用法?Golang NewHealthCheck怎么用?Golang NewHealthCheck使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewHealthCheck函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
defer exit.Recover()
flag.Parse()
servenv.Init()
if initFakeZK != nil {
initFakeZK()
}
ts := topo.GetServer()
defer topo.CloseServers()
resilientSrvTopoServer = vtgate.NewResilientSrvTopoServer(ts, "ResilientSrvTopoServer")
healthCheck = discovery.NewHealthCheck(*connTimeoutTotal, *healthCheckRetryDelay, *healthCheckTimeout, "" /* statsSuffix */)
tabletTypes := make([]topodatapb.TabletType, 0, 1)
if len(*tabletTypesToWait) != 0 {
for _, ttStr := range strings.Split(*tabletTypesToWait, ",") {
tt, err := topoproto.ParseTabletType(ttStr)
if err != nil {
log.Errorf("unknown tablet type: %v", ttStr)
continue
}
tabletTypes = append(tabletTypes, tt)
}
}
vtg := vtgate.Init(context.Background(), healthCheck, ts, resilientSrvTopoServer, *cell, *retryDelay, *retryCount, *connTimeoutTotal, *connTimeoutPerConn, *connLife, tabletTypes, *maxInFlight, *testGateway)
servenv.OnRun(func() {
addStatusParts(vtg)
})
servenv.RunDefault()
}
示例2: startHealthWatchers
// startHealthWatchers launches the topology watchers and health checking to monitor
// all tablets on the shard. Function should be called before the start of the schema
// swap process.
func (shardSwap *shardSchemaSwap) startHealthWatchers() error {
shardSwap.tabletHealthCheck = discovery.NewHealthCheck(
*vtctl.HealthCheckTopologyRefresh, *vtctl.HealthcheckRetryDelay, *vtctl.HealthCheckTimeout)
shardSwap.tabletHealthCheck.SetListener(shardSwap, true /* sendDownEvents */)
topoServer := shardSwap.parent.topoServer
cellList, err := topoServer.GetKnownCells(shardSwap.parent.ctx)
if err != nil {
return err
}
for _, cell := range cellList {
watcher := discovery.NewShardReplicationWatcher(
topoServer,
shardSwap.tabletHealthCheck,
cell,
shardSwap.parent.keyspace,
shardSwap.shardName,
*vtctl.HealthCheckTimeout,
discovery.DefaultTopoReadConcurrency)
shardSwap.tabletWatchers = append(shardSwap.tabletWatchers, watcher)
}
for _, watcher := range shardSwap.tabletWatchers {
if err := watcher.WaitForInitialTopology(); err != nil {
return err
}
}
shardSwap.tabletHealthCheck.WaitForInitialStatsUpdates()
return nil
}
示例3: main
func main() {
defer exit.Recover()
flag.Parse()
servenv.Init()
ts := topo.GetServer()
defer topo.CloseServers()
resilientSrvTopoServer = vtgate.NewResilientSrvTopoServer(ts, "ResilientSrvTopoServer")
healthCheck = discovery.NewHealthCheck(*healthCheckConnTimeout, *healthCheckRetryDelay, *healthCheckTimeout)
healthCheck.RegisterStats()
tabletTypes := make([]topodatapb.TabletType, 0, 1)
if len(*tabletTypesToWait) != 0 {
for _, ttStr := range strings.Split(*tabletTypesToWait, ",") {
tt, err := topoproto.ParseTabletType(ttStr)
if err != nil {
log.Errorf("unknown tablet type: %v", ttStr)
continue
}
tabletTypes = append(tabletTypes, tt)
}
}
l2vtg := l2vtgate.Init(healthCheck, ts, resilientSrvTopoServer, *cell, *retryCount, tabletTypes)
servenv.OnRun(func() {
addStatusParts(l2vtg)
})
servenv.RunDefault()
}
示例4: newRealtimeStats
func newRealtimeStats(ts topo.Server) (*realtimeStats, error) {
hc := discovery.NewHealthCheck(*vtctl.HealthCheckTimeout, *vtctl.HealthcheckRetryDelay, *vtctl.HealthCheckTimeout)
tabletStatsCache := &tabletStatsCache{
statuses: make(map[string]map[string]*discovery.TabletStats),
}
hc.SetListener(tabletStatsCache)
r := &realtimeStats{
healthCheck: hc,
tabletStats: tabletStatsCache,
}
// Get the list of all tablets from all cells and monitor the topology for added or removed tablets with a CellTabletsWatcher.
cells, err := ts.GetKnownCells(context.Background())
if err != nil {
return r, fmt.Errorf("error when getting cells: %v", err)
}
var watchers []*discovery.TopologyWatcher
for _, cell := range cells {
watcher := discovery.NewCellTabletsWatcher(ts, hc, cell, *vtctl.HealthCheckTopologyRefresh, discovery.DefaultTopoReadConcurrency)
watchers = append(watchers, watcher)
}
r.cellWatchers = watchers
return r, nil
}
示例5: FindHealthyRdonlyEndPoint
// FindHealthyRdonlyEndPoint returns a random healthy endpoint.
// Since we don't want to use them all, we require at least
// minHealthyEndPoints servers to be healthy.
// May block up to -wait_for_healthy_rdonly_endpoints_timeout.
func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, keyspace, shard string) (*topodatapb.TabletAlias, error) {
busywaitCtx, busywaitCancel := context.WithTimeout(ctx, *WaitForHealthyEndPointsTimeout)
defer busywaitCancel()
// create a discovery healthcheck, wait for it to have one rdonly
// endpoints at this point
healthCheck := discovery.NewHealthCheck(*remoteActionsTimeout, *healthcheckRetryDelay, *healthCheckTimeout, "" /* statsSuffix */)
watcher := discovery.NewShardReplicationWatcher(wr.TopoServer(), healthCheck, cell, keyspace, shard, *healthCheckTopologyRefresh, 5 /*topoReadConcurrency*/)
defer watcher.Stop()
defer healthCheck.Close()
if err := discovery.WaitForEndPoints(ctx, healthCheck, cell, keyspace, shard, []topodatapb.TabletType{topodatapb.TabletType_RDONLY}); err != nil {
return nil, fmt.Errorf("error waiting for rdonly endpoints for (%v,%v/%v): %v", cell, keyspace, shard, err)
}
var healthyEndpoints []*topodatapb.EndPoint
for {
select {
case <-busywaitCtx.Done():
return nil, fmt.Errorf("Not enough endpoints to choose from in (%v,%v/%v), have %v healthy ones, need at least %v Context Error: %v", cell, keyspace, shard, len(healthyEndpoints), *minHealthyEndPoints, busywaitCtx.Err())
default:
}
addrs := healthCheck.GetEndPointStatsFromTarget(keyspace, shard, topodatapb.TabletType_RDONLY)
healthyEndpoints = make([]*topodatapb.EndPoint, 0, len(addrs))
for _, addr := range addrs {
// Note we do not check the 'Serving' flag here.
// This is mainly to avoid the case where we run a
// Diff between a source and destination, and the source
// is not serving (disabled by TabletControl).
// When we switch the tablet to 'worker', it will
// go back to serving state.
if addr.Stats == nil || addr.Stats.HealthError != "" || addr.Stats.SecondsBehindMaster > 30 {
continue
}
healthyEndpoints = append(healthyEndpoints, addr.EndPoint)
}
if len(healthyEndpoints) >= *minHealthyEndPoints {
break
}
deadlineForLog, _ := busywaitCtx.Deadline()
wr.Logger().Infof("Waiting for enough endpoints to become available. available: %v required: %v Waiting up to %.1f more seconds.", len(healthyEndpoints), *minHealthyEndPoints, deadlineForLog.Sub(time.Now()).Seconds())
// Block for 1 second because 2 seconds is the -health_check_interval flag value in integration tests.
timer := time.NewTimer(1 * time.Second)
select {
case <-busywaitCtx.Done():
timer.Stop()
case <-timer.C:
}
}
// random server in the list is what we want
index := rand.Intn(len(healthyEndpoints))
return &topodatapb.TabletAlias{
Cell: cell,
Uid: healthyEndpoints[index].Uid,
}, nil
}
示例6: main
func main() {
defer exit.Recover()
flag.Parse()
servenv.Init()
if initFakeZK != nil {
initFakeZK()
}
ts := topo.GetServer()
defer topo.CloseServers()
var schema *planbuilder.Schema
if *schemaFile != "" {
var err error
if schema, err = planbuilder.LoadFile(*schemaFile); err != nil {
log.Error(err)
exit.Return(1)
}
log.Infof("v3 is enabled: loaded schema from file: %v", *schemaFile)
} else {
ctx := context.Background()
schemaJSON, err := ts.GetVSchema(ctx)
if err != nil {
log.Warningf("Skipping v3 initialization: GetVSchema failed: %v", err)
goto startServer
}
schema, err = planbuilder.NewSchema([]byte(schemaJSON))
if err != nil {
log.Warningf("Skipping v3 initialization: GetVSchema failed: %v", err)
goto startServer
}
log.Infof("v3 is enabled: loaded schema from topo")
}
startServer:
resilientSrvTopoServer = vtgate.NewResilientSrvTopoServer(ts, "ResilientSrvTopoServer")
healthCheck = discovery.NewHealthCheck(*connTimeoutTotal, *healthCheckRetryDelay, *healthCheckTimeout, "" /* statsSuffix */)
tabletTypes := make([]topodatapb.TabletType, 0, 1)
if len(*tabletTypesToWait) != 0 {
for _, ttStr := range strings.Split(*tabletTypesToWait, ",") {
tt, err := topoproto.ParseTabletType(ttStr)
if err != nil {
log.Errorf("unknown tablet type: %v", ttStr)
continue
}
tabletTypes = append(tabletTypes, tt)
}
}
vtg := vtgate.Init(healthCheck, ts, resilientSrvTopoServer, schema, *cell, *retryDelay, *retryCount, *connTimeoutTotal, *connTimeoutPerConn, *connLife, tabletTypes, *maxInFlight, *testGateway)
servenv.OnRun(func() {
addStatusParts(vtg)
})
servenv.RunDefault()
}
示例7: main
func main() {
defer exit.Recover()
// flag parsing
flags := dbconfigs.AppConfig | dbconfigs.DbaConfig |
dbconfigs.FilteredConfig | dbconfigs.ReplConfig
dbconfigs.RegisterFlags(flags)
mysqlctl.RegisterFlags()
flag.Parse()
if len(flag.Args()) > 0 {
flag.Usage()
log.Errorf("vtcombo doesn't take any positional arguments")
exit.Return(1)
}
// register topo server
topo.RegisterServer("fakezk", zktopo.NewServer(fakezk.NewConn()))
ts := topo.GetServerByName("fakezk")
servenv.Init()
// database configs
mycnf, err := mysqlctl.NewMycnfFromFlags(0)
if err != nil {
log.Errorf("mycnf read failed: %v", err)
exit.Return(1)
}
dbcfgs, err := dbconfigs.Init(mycnf.SocketFile, flags)
if err != nil {
log.Warning(err)
}
mysqld := mysqlctl.NewMysqld("Dba", "App", mycnf, &dbcfgs.Dba, &dbcfgs.App.ConnParams, &dbcfgs.Repl)
// tablets configuration and init
binlog.RegisterUpdateStreamService(mycnf)
initTabletMap(ts, *topology, mysqld, dbcfgs, mycnf)
// vtgate configuration and init
resilientSrvTopoServer := vtgate.NewResilientSrvTopoServer(ts, "ResilientSrvTopoServer")
healthCheck := discovery.NewHealthCheck(30*time.Second /*connTimeoutTotal*/, 1*time.Millisecond /*retryDelay*/)
vtgate.Init(healthCheck, ts, resilientSrvTopoServer, nil /*schema*/, cell, 1*time.Millisecond /*retryDelay*/, 2 /*retryCount*/, 30*time.Second /*connTimeoutTotal*/, 10*time.Second /*connTimeoutPerConn*/, 365*24*time.Hour /*connLife*/, 0 /*maxInFlight*/, "" /*testGateway*/)
servenv.OnTerm(func() {
// FIXME(alainjobart) stop vtgate, all tablets
// qsc.DisallowQueries()
// agent.Stop()
})
servenv.OnClose(func() {
// We will still use the topo server during lameduck period
// to update our state, so closing it in OnClose()
topo.CloseServers()
})
servenv.RunDefault()
}
示例8: TestGRPCDiscovery
// TestGRPCDiscovery tests the discovery gateway with a gRPC
// connection from the gateway to the fake tablet.
func TestGRPCDiscovery(t *testing.T) {
flag.Set("tablet_protocol", "grpc")
flag.Set("gateway_implementation", "discoverygateway")
// Fake services for the tablet, topo server.
service, ts, cell := CreateFakeServers(t)
// Tablet: listen on a random port.
listener, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatalf("Cannot listen: %v", err)
}
host := listener.Addr().(*net.TCPAddr).IP.String()
port := listener.Addr().(*net.TCPAddr).Port
defer listener.Close()
// Tablet: create a gRPC server and listen on the port.
server := grpc.NewServer()
grpcqueryservice.Register(server, service)
go server.Serve(listener)
defer server.Stop()
// VTGate: create the discovery healthcheck, and the gateway.
// Wait for the right tablets to be present.
hc := discovery.NewHealthCheck(30*time.Second, 10*time.Second, 2*time.Minute)
dg := gateway.GetCreator()(hc, ts, ts, cell, 2)
hc.AddTablet(&topodatapb.Tablet{
Alias: &topodatapb.TabletAlias{
Cell: cell,
Uid: 43,
},
Keyspace: tabletconntest.TestTarget.Keyspace,
Shard: tabletconntest.TestTarget.Shard,
Type: tabletconntest.TestTarget.TabletType,
Hostname: host,
PortMap: map[string]int32{
"grpc": int32(port),
},
}, "test_tablet")
err = gateway.WaitForTablets(dg, []topodatapb.TabletType{tabletconntest.TestTarget.TabletType})
if err != nil {
t.Fatalf("WaitForTablets failed: %v", err)
}
defer dg.Close(context.Background())
// run the test suite.
TestSuite(t, "discovery-grpc", dg, service)
// run it again with vtgate combining Begin and Execute
flag.Set("tablet_grpc_combine_begin_execute", "true")
TestSuite(t, "discovery-grpc-combo", dg, service)
}
示例9: init
// init phase:
// - read the destination keyspace, make sure it has 'servedFrom' values
func (scw *SplitCloneWorker) init(ctx context.Context) error {
scw.setState(WorkerStateInit)
// read the keyspace and validate it
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
var err error
scw.destinationKeyspaceInfo, err = scw.wr.TopoServer().GetKeyspace(shortCtx, scw.destinationKeyspace)
cancel()
if err != nil {
return fmt.Errorf("cannot read (destination) keyspace %v: %v", scw.destinationKeyspace, err)
}
// Set source and destination shard infos.
switch scw.cloneType {
case horizontalResharding:
if err := scw.initShardsForHorizontalResharding(ctx); err != nil {
return err
}
case verticalSplit:
if err := scw.initShardsForVerticalSplit(ctx); err != nil {
return err
}
}
if err := scw.sanityCheckShardInfos(); err != nil {
return err
}
if scw.cloneType == horizontalResharding {
if err := scw.loadVSchema(ctx); err != nil {
return err
}
}
// Initialize healthcheck and add destination shards to it.
scw.healthCheck = discovery.NewHealthCheck(*remoteActionsTimeout, *healthcheckRetryDelay, *healthCheckTimeout)
scw.tsc = discovery.NewTabletStatsCacheDoNotSetListener(scw.cell)
// We set sendDownEvents=true because it's required by TabletStatsCache.
scw.healthCheck.SetListener(scw, true /* sendDownEvents */)
// Start watchers to get tablets added automatically to healthCheck.
allShards := append(scw.sourceShards, scw.destinationShards...)
for _, si := range allShards {
watcher := discovery.NewShardReplicationWatcher(scw.wr.TopoServer(), scw.healthCheck,
scw.cell, si.Keyspace(), si.ShardName(),
*healthCheckTopologyRefresh, discovery.DefaultTopoReadConcurrency)
scw.shardWatchers = append(scw.shardWatchers, watcher)
}
return nil
}
示例10: main
func main() {
defer exit.Recover()
flag.Parse()
servenv.Init()
if initFakeZK != nil {
initFakeZK()
}
ts := topo.GetServer()
defer topo.CloseServers()
var schema *planbuilder.Schema
if *schemaFile != "" {
var err error
if schema, err = planbuilder.LoadFile(*schemaFile); err != nil {
log.Error(err)
exit.Return(1)
}
log.Infof("v3 is enabled: loaded schema from file: %v", *schemaFile)
} else {
ctx := context.Background()
schemaJSON, err := ts.GetVSchema(ctx)
if err != nil {
log.Warningf("Skipping v3 initialization: GetVSchema failed: %v", err)
goto startServer
}
schema, err = planbuilder.NewSchema([]byte(schemaJSON))
if err != nil {
log.Warningf("Skipping v3 initialization: GetVSchema failed: %v", err)
goto startServer
}
log.Infof("v3 is enabled: loaded schema from topo")
}
startServer:
resilientSrvTopoServer = vtgate.NewResilientSrvTopoServer(ts, "ResilientSrvTopoServer")
// For the initial phase vtgate is exposing
// topoReader api. This will be subsumed by
// vtgate once vtgate's client functions become active.
topoReader = NewTopoReader(resilientSrvTopoServer)
servenv.Register("toporeader", topoReader)
healthCheck = discovery.NewHealthCheck(*connTimeoutTotal, *healthCheckRetryDelay)
vtgate.Init(healthCheck, ts, resilientSrvTopoServer, schema, *cell, *retryDelay, *retryCount, *connTimeoutTotal, *connTimeoutPerConn, *connLife, *maxInFlight, *testGateway)
servenv.RunDefault()
}
示例11: newBinlogPlayerController
func newBinlogPlayerController(ts topo.Server, vtClientFactory func() binlogplayer.VtClient, mysqld mysqlctl.MysqlDaemon, cell string, keyRange *topodatapb.KeyRange, sourceShard *topodatapb.Shard_SourceShard, dbName string) *BinlogPlayerController {
blc := &BinlogPlayerController{
ts: ts,
vtClientFactory: vtClientFactory,
mysqld: mysqld,
cell: cell,
keyRange: keyRange,
dbName: dbName,
sourceShard: sourceShard,
binlogPlayerStats: binlogplayer.NewStats(),
healthCheck: discovery.NewHealthCheck(*binlogplayer.BinlogPlayerConnTimeout, *retryDelay, *healthCheckTimeout),
}
blc.shardReplicationWatcher = discovery.NewShardReplicationWatcher(ts, blc.healthCheck, cell, sourceShard.Keyspace, sourceShard.Shard, *healthCheckTopologyRefresh, 5)
return blc
}
示例12: newClient
func newClient(master *master, replica *replica) *client {
t, err := throttler.NewThrottler("client", "TPS", 1, throttler.MaxRateModuleDisabled, 5 /* seconds */)
if err != nil {
log.Fatal(err)
}
healthCheck := discovery.NewHealthCheck(1*time.Minute, 5*time.Second, 1*time.Minute)
c := &client{
master: master,
healthCheck: healthCheck,
throttler: t,
stopChan: make(chan struct{}),
}
c.healthCheck.SetListener(c, false /* sendDownEvents */)
c.healthCheck.AddTablet(replica.fakeTablet.Tablet, "name")
return c
}
示例13: newBinlogPlayerController
func newBinlogPlayerController(ts topo.Server, vtClientFactory func() binlogplayer.VtClient, mysqld mysqlctl.MysqlDaemon, cell string, keyspaceIDType pb.KeyspaceIdType, keyRange *pb.KeyRange, sourceShard *pb.Shard_SourceShard, dbName string) *BinlogPlayerController {
blc := &BinlogPlayerController{
ts: ts,
vtClientFactory: vtClientFactory,
mysqld: mysqld,
cell: cell,
keyspaceIDType: keyspaceIDType,
keyRange: keyRange,
dbName: dbName,
sourceShard: sourceShard,
binlogPlayerStats: binlogplayer.NewBinlogPlayerStats(),
healthCheck: discovery.NewHealthCheck(*binlogplayer.BinlogPlayerConnTimeout, *retryDelay),
initialEndpointFound: make(chan struct{}),
}
blc.healthCheck.SetListener(blc)
blc.shardReplicationWatcher = discovery.NewShardReplicationWatcher(ts, blc.healthCheck, cell, sourceShard.Keyspace, sourceShard.Shard, *healthcheckTopologyRefresh, 5)
return blc
}
示例14: FindHealthyRdonlyTablet
// FindHealthyRdonlyTablet returns a random healthy RDONLY tablet.
// Since we don't want to use them all, we require at least
// minHealthyRdonlyTablets servers to be healthy.
// May block up to -wait_for_healthy_rdonly_tablets_timeout.
func FindHealthyRdonlyTablet(ctx context.Context, wr *wrangler.Wrangler, healthCheck discovery.HealthCheck, cell, keyspace, shard string, minHealthyRdonlyTablets int) (*topodatapb.TabletAlias, error) {
if healthCheck == nil {
// No healthcheck instance provided. Create one.
healthCheck = discovery.NewHealthCheck(*remoteActionsTimeout, *healthcheckRetryDelay, *healthCheckTimeout)
watcher := discovery.NewShardReplicationWatcher(wr.TopoServer(), healthCheck, cell, keyspace, shard, *healthCheckTopologyRefresh, discovery.DefaultTopoReadConcurrency)
defer watcher.Stop()
defer healthCheck.Close()
}
healthyTablets, err := waitForHealthyRdonlyTablets(ctx, wr, healthCheck, cell, keyspace, shard, minHealthyRdonlyTablets, *waitForHealthyTabletsTimeout)
if err != nil {
return nil, err
}
// random server in the list is what we want
index := rand.Intn(len(healthyTablets))
return healthyTablets[index].Tablet.Alias, nil
}
示例15: newBinlogPlayerController
// newBinlogPlayerController instantiates a new BinlogPlayerController.
// Use Start() and Stop() to start and stop it.
// Once stopped, you should call Close() to stop and free resources e.g. the
// healthcheck instance.
func newBinlogPlayerController(ts topo.Server, vtClientFactory func() binlogplayer.VtClient, mysqld mysqlctl.MysqlDaemon, cell string, keyRange *topodatapb.KeyRange, sourceShard *topodatapb.Shard_SourceShard, dbName string) *BinlogPlayerController {
healthCheck := discovery.NewHealthCheck(*binlogplayer.BinlogPlayerConnTimeout, *healthcheckRetryDelay, *healthCheckTimeout)
return &BinlogPlayerController{
ts: ts,
vtClientFactory: vtClientFactory,
mysqld: mysqld,
cell: cell,
keyRange: keyRange,
dbName: dbName,
sourceShard: sourceShard,
binlogPlayerStats: binlogplayer.NewStats(),
// Note: healthCheck and shardReplicationWatcher remain active independent
// of whether the BinlogPlayerController is Start()'d or Stop()'d.
// Use Close() after Stop() to finally close them and free their resources.
healthCheck: healthCheck,
shardReplicationWatcher: discovery.NewShardReplicationWatcher(ts, healthCheck, cell, sourceShard.Keyspace, sourceShard.Shard, *healthCheckTopologyRefresh, discovery.DefaultTopoReadConcurrency),
}
}