本文整理汇总了Golang中github.com/tsuru/tsuru/api/shutdown.Register函数的典型用法代码示例。如果您正苦于以下问题:Golang Register函数的具体用法?Golang Register怎么用?Golang Register使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Register函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Initialize
func Initialize() (*NodeHealer, error) {
if HealerInstance != nil {
return nil, errors.New("healer alread initialized")
}
autoHealingNodes, err := config.GetBool("docker:healing:heal-nodes")
if err != nil {
autoHealingNodes = true
}
if !autoHealingNodes {
return nil, nil
}
disabledSeconds, _ := config.GetInt("docker:healing:disabled-time")
if disabledSeconds <= 0 {
disabledSeconds = 30
}
maxFailures, _ := config.GetInt("docker:healing:max-failures")
if maxFailures <= 0 {
maxFailures = 5
}
waitSecondsNewMachine, _ := config.GetInt("docker:healing:wait-new-time")
if waitSecondsNewMachine <= 0 {
waitSecondsNewMachine = 5 * 60
}
HealerInstance = newNodeHealer(nodeHealerArgs{
DisabledTime: time.Duration(disabledSeconds) * time.Second,
WaitTimeNewMachine: time.Duration(waitSecondsNewMachine) * time.Second,
FailuresBeforeHealing: maxFailures,
})
shutdown.Register(HealerInstance)
return HealerInstance, nil
}
示例2: Queue
func Queue() (monsterqueue.Queue, error) {
queueData.RLock()
if queueData.instance != nil {
defer queueData.RUnlock()
return queueData.instance, nil
}
queueData.RUnlock()
queueData.Lock()
defer queueData.Unlock()
if queueData.instance != nil {
return queueData.instance, nil
}
queueMongoUrl, _ := config.GetString("queue:mongo-url")
if queueMongoUrl == "" {
queueMongoUrl = "localhost:27017"
}
queueMongoDB, _ := config.GetString("queue:mongo-database")
conf := mongodb.QueueConfig{
CollectionPrefix: "tsuru",
Url: queueMongoUrl,
Database: queueMongoDB,
}
var err error
queueData.instance, err = mongodb.NewQueue(conf)
if err != nil {
return nil, fmt.Errorf("could not create queue instance, please check queue:mongo-url and queue:mongo-database config entries. error: %s", err)
}
shutdown.Register(&queueData)
go queueData.instance.ProcessLoop()
return queueData.instance, nil
}
示例3: Queue
func Queue() (monsterqueue.Queue, error) {
queueData.RLock()
if queueData.instance != nil {
defer queueData.RUnlock()
return queueData.instance, nil
}
queueData.RUnlock()
queueData.Lock()
defer queueData.Unlock()
if queueData.instance != nil {
return queueData.instance, nil
}
queueMongoUrl, _ := config.GetString("queue:mongo-url")
if queueMongoUrl == "" {
queueMongoUrl = "localhost:27017"
}
queueMongoDB, _ := config.GetString("queue:mongo-database")
pollingInterval, _ := config.GetFloat("queue:mongo-polling-interval")
if pollingInterval == 0.0 {
pollingInterval = 1.0
}
conf := mongodb.QueueConfig{
CollectionPrefix: "tsuru",
Url: queueMongoUrl,
Database: queueMongoDB,
PollingInterval: time.Duration(pollingInterval * float64(time.Second)),
}
var err error
queueData.instance, err = mongodb.NewQueue(conf)
if err != nil {
return nil, errors.Wrap(err, "could not create queue instance, please check queue:mongo-url and queue:mongo-database config entries. error")
}
shutdown.Register(&queueData)
go queueData.instance.ProcessLoop()
return queueData.instance, nil
}
示例4: initDockerCluster
func (p *dockerProvisioner) initDockerCluster() error {
debug, _ := config.GetBool("debug")
clusterLog.SetDebug(debug)
clusterLog.SetLogger(log.GetStdLogger())
var err error
if p.storage == nil {
p.storage, err = buildClusterStorage()
if err != nil {
return err
}
}
if p.collectionName == "" {
var name string
name, err = config.GetString("docker:collection")
if err != nil {
return err
}
p.collectionName = name
}
var nodes []cluster.Node
TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
maxUsedMemory, _ := config.GetFloat("docker:scheduler:max-used-memory")
p.scheduler = &segregatedScheduler{
maxMemoryRatio: float32(maxUsedMemory),
TotalMemoryMetadata: TotalMemoryMetadata,
provisioner: p,
}
p.cluster, err = cluster.New(p.scheduler, p.storage, nodes...)
if err != nil {
return err
}
p.cluster.Hook = &bs.ClusterHook{Provisioner: p}
autoHealingNodes, _ := config.GetBool("docker:healing:heal-nodes")
if autoHealingNodes {
disabledSeconds, _ := config.GetInt("docker:healing:disabled-time")
if disabledSeconds <= 0 {
disabledSeconds = 30
}
maxFailures, _ := config.GetInt("docker:healing:max-failures")
if maxFailures <= 0 {
maxFailures = 5
}
waitSecondsNewMachine, _ := config.GetInt("docker:healing:wait-new-time")
if waitSecondsNewMachine <= 0 {
waitSecondsNewMachine = 5 * 60
}
nodeHealer := healer.NewNodeHealer(healer.NodeHealerArgs{
Provisioner: p,
DisabledTime: time.Duration(disabledSeconds) * time.Second,
WaitTimeNewMachine: time.Duration(waitSecondsNewMachine) * time.Second,
FailuresBeforeHealing: maxFailures,
})
shutdown.Register(nodeHealer)
p.cluster.Healer = nodeHealer
}
healContainersSeconds, _ := config.GetInt("docker:healing:heal-containers-timeout")
if healContainersSeconds > 0 {
contHealerInst := healer.NewContainerHealer(healer.ContainerHealerArgs{
Provisioner: p,
MaxUnresponsiveTime: time.Duration(healContainersSeconds) * time.Second,
Done: make(chan bool),
Locker: &appLocker{},
})
shutdown.Register(contHealerInst)
go contHealerInst.RunContainerHealer()
}
activeMonitoring, _ := config.GetInt("docker:healing:active-monitoring-interval")
if activeMonitoring > 0 {
p.cluster.StartActiveMonitoring(time.Duration(activeMonitoring) * time.Second)
}
autoScale := p.initAutoScaleConfig()
if autoScale.Enabled {
shutdown.Register(autoScale)
go autoScale.run()
}
return nil
}
示例5: RunServer
//.........这里部分代码省略.........
m.Add("Get", "/debug/pprof/threadcreate", AdminRequiredHandler(indexHandler))
m.Add("Get", "/debug/pprof/block", AdminRequiredHandler(indexHandler))
n := negroni.New()
n.Use(negroni.NewRecovery())
n.Use(newLoggerMiddleware())
n.UseHandler(m)
n.Use(negroni.HandlerFunc(contextClearerMiddleware))
n.Use(negroni.HandlerFunc(flushingWriterMiddleware))
n.Use(negroni.HandlerFunc(errorHandlingMiddleware))
n.Use(negroni.HandlerFunc(setVersionHeadersMiddleware))
n.Use(negroni.HandlerFunc(authTokenMiddleware))
n.Use(&appLockMiddleware{excludedHandlers: []http.Handler{
logPostHandler,
runHandler,
forceDeleteLockHandler,
registerUnitHandler,
setUnitStatusHandler,
}})
n.UseHandler(http.HandlerFunc(runDelayedHandler))
if !dry {
var startupMessage string
routers, err := router.List()
if err != nil {
fatal(err)
}
for _, routerDesc := range routers {
var r router.Router
r, err = router.Get(routerDesc.Name)
if err != nil {
fatal(err)
}
fmt.Printf("Registered router %q", routerDesc.Name)
if messageRouter, ok := r.(router.MessageRouter); ok {
startupMessage, err = messageRouter.StartupMessage()
if err == nil && startupMessage != "" {
fmt.Printf(": %s", startupMessage)
}
}
fmt.Println()
}
defaultRouter, _ := config.GetString("docker:router")
fmt.Printf("Default router is %q.\n", defaultRouter)
repoManager, err := config.GetString("repo-manager")
if err != nil {
repoManager = "gandalf"
fmt.Println("Warning: configuration didn't declare a repository manager, using default manager.")
}
fmt.Printf("Using %q repository manager.\n", repoManager)
provisioner, err := getProvisioner()
if err != nil {
fmt.Println("Warning: configuration didn't declare a provisioner, using default provisioner.")
}
app.Provisioner, err = provision.Get(provisioner)
if err != nil {
fatal(err)
}
fmt.Printf("Using %q provisioner.\n", provisioner)
if initializableProvisioner, ok := app.Provisioner.(provision.InitializableProvisioner); ok {
err = initializableProvisioner.Initialize()
if err != nil {
fatal(err)
}
}
if messageProvisioner, ok := app.Provisioner.(provision.MessageProvisioner); ok {
示例6: initDockerCluster
func (p *dockerProvisioner) initDockerCluster() error {
debug, _ := config.GetBool("debug")
clusterLog.SetDebug(debug)
clusterLog.SetLogger(log.GetStdLogger())
var err error
if p.storage == nil {
p.storage, err = buildClusterStorage()
if err != nil {
return err
}
}
if p.collectionName == "" {
var name string
name, err = config.GetString("docker:collection")
if err != nil {
return err
}
p.collectionName = name
}
var nodes []cluster.Node
TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
maxUsedMemory, _ := config.GetFloat("docker:scheduler:max-used-memory")
p.scheduler = &segregatedScheduler{
maxMemoryRatio: float32(maxUsedMemory),
TotalMemoryMetadata: TotalMemoryMetadata,
provisioner: p,
}
caPath, _ := config.GetString("docker:tls:root-path")
if caPath != "" {
p.caCert, err = ioutil.ReadFile(filepath.Join(caPath, "ca.pem"))
if err != nil {
return err
}
p.clientCert, err = ioutil.ReadFile(filepath.Join(caPath, "cert.pem"))
if err != nil {
return err
}
p.clientKey, err = ioutil.ReadFile(filepath.Join(caPath, "key.pem"))
if err != nil {
return err
}
}
p.cluster, err = cluster.New(p.scheduler, p.storage, nodes...)
if err != nil {
return err
}
p.cluster.AddHook(cluster.HookEventBeforeContainerCreate, &internalNodeContainer.ClusterHook{Provisioner: p})
if tsuruHealer.HealerInstance != nil {
healer := hookHealer{p: p}
p.cluster.Healer = healer
p.cluster.AddHook(cluster.HookEventBeforeNodeUnregister, healer)
}
healContainersSeconds, _ := config.GetInt("docker:healing:heal-containers-timeout")
if healContainersSeconds > 0 {
contHealerInst := healer.NewContainerHealer(healer.ContainerHealerArgs{
Provisioner: p,
MaxUnresponsiveTime: time.Duration(healContainersSeconds) * time.Second,
Done: make(chan bool),
Locker: &appLocker{},
})
shutdown.Register(contHealerInst)
go contHealerInst.RunContainerHealer()
}
activeMonitoring, _ := config.GetInt("docker:healing:active-monitoring-interval")
if activeMonitoring > 0 {
p.cluster.StartActiveMonitoring(time.Duration(activeMonitoring) * time.Second)
}
autoScale := p.initAutoScaleConfig()
if autoScale.Enabled {
shutdown.Register(autoScale)
go autoScale.run()
}
limitMode, _ := config.GetString("docker:limit:mode")
if limitMode == "global" {
p.actionLimiter = &provision.MongodbLimiter{}
} else {
p.actionLimiter = &provision.LocalLimiter{}
}
actionLimit, _ := config.GetUint("docker:limit:actions-per-host")
if actionLimit > 0 {
p.actionLimiter.Initialize(actionLimit)
}
return nil
}
示例7: startServer
func startServer(handler http.Handler) {
shutdownChan := make(chan bool)
shutdownTimeout, _ := config.GetInt("shutdown-timeout")
if shutdownTimeout == 0 {
shutdownTimeout = 10 * 60
}
idleTracker := newIdleTracker()
shutdown.Register(idleTracker)
shutdown.Register(&logTracker)
readTimeout, _ := config.GetInt("server:read-timeout")
writeTimeout, _ := config.GetInt("server:write-timeout")
listen, err := config.GetString("listen")
if err != nil {
fatal(err)
}
srv := &graceful.Server{
Timeout: time.Duration(shutdownTimeout) * time.Second,
Server: &http.Server{
ReadTimeout: time.Duration(readTimeout) * time.Second,
WriteTimeout: time.Duration(writeTimeout) * time.Second,
Addr: listen,
Handler: handler,
},
ConnState: func(conn net.Conn, state http.ConnState) {
idleTracker.trackConn(conn, state)
},
NoSignalHandling: true,
ShutdownInitiated: func() {
fmt.Println("tsuru is shutting down, waiting for pending connections to finish.")
handlers := shutdown.All()
wg := sync.WaitGroup{}
for _, h := range handlers {
wg.Add(1)
go func(h shutdown.Shutdownable) {
defer wg.Done()
fmt.Printf("running shutdown handler for %v...\n", h)
h.Shutdown()
fmt.Printf("running shutdown handler for %v. DONE.\n", h)
}(h)
}
wg.Wait()
close(shutdownChan)
},
}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigChan
srv.Stop(srv.Timeout)
}()
var startupMessage string
routers, err := router.List()
if err != nil {
fatal(err)
}
for _, routerDesc := range routers {
var r router.Router
r, err = router.Get(routerDesc.Name)
if err != nil {
fatal(err)
}
fmt.Printf("Registered router %q", routerDesc.Name)
if messageRouter, ok := r.(router.MessageRouter); ok {
startupMessage, err = messageRouter.StartupMessage()
if err == nil && startupMessage != "" {
fmt.Printf(": %s", startupMessage)
}
}
fmt.Println()
}
defaultRouter, _ := config.GetString("docker:router")
fmt.Printf("Default router is %q.\n", defaultRouter)
repoManager, err := config.GetString("repo-manager")
if err != nil {
repoManager = "gandalf"
fmt.Println("Warning: configuration didn't declare a repository manager, using default manager.")
}
fmt.Printf("Using %q repository manager.\n", repoManager)
err = rebuild.RegisterTask(appFinder)
if err != nil {
fatal(err)
}
scheme, err := getAuthScheme()
if err != nil {
fmt.Printf("Warning: configuration didn't declare auth:scheme, using default scheme.\n")
}
app.AuthScheme, err = auth.GetScheme(scheme)
if err != nil {
fatal(err)
}
fmt.Printf("Using %q auth scheme.\n", scheme)
err = provision.InitializeAll()
if err != nil {
fatal(err)
}
_, err = healer.Initialize()
if err != nil {
fatal(err)
}
fmt.Println("Checking components status:")
//.........这里部分代码省略.........