本文整理汇总了Golang中github.com/tsuru/config.GetBool函数的典型用法代码示例。如果您正苦于以下问题:Golang GetBool函数的具体用法?Golang GetBool怎么用?Golang GetBool使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GetBool函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: dockerCluster
func dockerCluster() *cluster.Cluster {
cmutex.Lock()
defer cmutex.Unlock()
if dCluster == nil {
debug, _ := config.GetBool("debug")
clusterLog.SetDebug(debug)
clusterLog.SetLogger(log.GetStdLogger())
clusterStorage, err := buildClusterStorage()
if err != nil {
panic(err.Error())
}
var nodes []cluster.Node
if isSegregateScheduler() {
dCluster, _ = cluster.New(&segregatedScheduler{}, clusterStorage)
} else {
nodes = getDockerServers()
dCluster, _ = cluster.New(nil, clusterStorage, nodes...)
}
autoHealing, _ := config.GetBool("docker:auto-healing")
if autoHealing {
healer := Healer{}
dCluster.SetHealer(&healer)
}
activeMonitoring, _ := config.GetBool("docker:active-monitoring")
if activeMonitoring {
dCluster.StartActiveMonitoring(1 * time.Minute)
}
}
return dCluster
}
示例2: createUser
func createUser(w http.ResponseWriter, r *http.Request) error {
registrationEnabled, _ := config.GetBool("auth:user-registration")
if !registrationEnabled {
token := r.Header.Get("Authorization")
t, err := app.AuthScheme.Auth(token)
if err != nil {
return createDisabledErr
}
user, err := t.User()
if err != nil {
return createDisabledErr
}
if !user.IsAdmin() {
return createDisabledErr
}
}
var u auth.User
err := json.NewDecoder(r.Body).Decode(&u)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
_, err = app.AuthScheme.Create(&u)
if err != nil {
return handleAuthError(err)
}
rec.Log(u.Email, "create-user")
w.WriteHeader(http.StatusCreated)
return nil
}
示例3: TestGetS3Endpoint
func (s *S) TestGetS3Endpoint(c *gocheck.C) {
oldRegion, _ := config.Get("aws:s3:region-name")
defer config.Set("aws:s3:region-name", oldRegion)
config.Set("aws:s3:region-name", "myregion")
edp, err := config.GetString("aws:s3:endpoint")
c.Assert(err, gocheck.IsNil)
locConst, err := config.GetBool("aws:s3:location-constraint")
c.Assert(err, gocheck.IsNil)
lwrCaseBucket, err := config.GetBool("aws:s3:lowercase-bucket")
c.Assert(err, gocheck.IsNil)
s3 := getS3Endpoint()
c.Assert(s3.S3Endpoint, gocheck.Equals, edp)
c.Assert(s3.S3LocationConstraint, gocheck.Equals, locConst)
c.Assert(s3.S3LowercaseBucket, gocheck.Equals, lwrCaseBucket)
c.Assert(s3.Region.Name, gocheck.Equals, "myregion")
}
示例4: elbSupport
func (p *JujuProvisioner) elbSupport() bool {
if p.elb == nil {
elb, _ := config.GetBool("juju:use-elb")
p.elb = &elb
}
return *p.elb
}
示例5: checkInstances
func (h elbInstanceHealer) checkInstances(names []string) ([]elbInstance, error) {
if elbSupport, _ := config.GetBool("juju:use-elb"); !elbSupport {
return nil, nil
}
lbs, err := h.describeLoadBalancers(names)
if err != nil {
return nil, err
}
var unhealthy []elbInstance
description := "Instance has failed at least the UnhealthyThreshold number of health checks consecutively."
state := "OutOfService"
reasonCode := "Instance"
for _, lb := range lbs {
instances, err := h.describeInstancesHealth(lb)
if err != nil {
return nil, err
}
for _, instance := range instances {
if instance.description == description &&
instance.state == state &&
instance.reasonCode == reasonCode {
unhealthy = append(unhealthy, instance)
}
}
}
log.Debugf("Found %d unhealthy instances.", len(unhealthy))
return unhealthy, nil
}
示例6: Initialize
func Initialize() (*NodeHealer, error) {
if HealerInstance != nil {
return nil, errors.New("healer alread initialized")
}
autoHealingNodes, err := config.GetBool("docker:healing:heal-nodes")
if err != nil {
autoHealingNodes = true
}
if !autoHealingNodes {
return nil, nil
}
disabledSeconds, _ := config.GetInt("docker:healing:disabled-time")
if disabledSeconds <= 0 {
disabledSeconds = 30
}
maxFailures, _ := config.GetInt("docker:healing:max-failures")
if maxFailures <= 0 {
maxFailures = 5
}
waitSecondsNewMachine, _ := config.GetInt("docker:healing:wait-new-time")
if waitSecondsNewMachine <= 0 {
waitSecondsNewMachine = 5 * 60
}
HealerInstance = newNodeHealer(nodeHealerArgs{
DisabledTime: time.Duration(disabledSeconds) * time.Second,
WaitTimeNewMachine: time.Duration(waitSecondsNewMachine) * time.Second,
FailuresBeforeHealing: maxFailures,
})
shutdown.Register(HealerInstance)
return HealerInstance, nil
}
示例7: CreateApp
// CreateApp creates a new app.
//
// Creating a new app is a process composed of four steps:
//
// 1. Save the app in the database
// 2. Create IAM credentials for the app
// 3. Create S3 bucket for the app (if the bucket support is enabled)
// 4. Create the git repository using gandalf
// 5. Provision units within the provisioner
func CreateApp(app *App, user *auth.User) error {
teams, err := user.Teams()
if err != nil {
return err
}
if len(teams) == 0 {
return NoTeamsError{}
}
if _, err := getPlatform(app.Platform); err != nil {
return err
}
app.SetTeams(teams)
app.Owner = user.Email
if !app.isValid() {
msg := "Invalid app name, your app should have at most 63 " +
"characters, containing only lower case letters, numbers or dashes, " +
"starting with a letter."
return &errors.ValidationError{Message: msg}
}
actions := []*action.Action{&reserveUserApp, &insertApp}
useS3, _ := config.GetBool("bucket-support")
if useS3 {
actions = append(actions, &createIAMUserAction,
&createIAMAccessKeyAction,
&createBucketAction, &createUserPolicyAction)
}
actions = append(actions, &exportEnvironmentsAction,
&createRepository, &provisionApp)
pipeline := action.NewPipeline(actions...)
err = pipeline.Execute(app, user)
if err != nil {
return &AppCreationError{app: app.Name, Err: err}
}
return nil
}
示例8: AddBackend
func (r elbRouter) AddBackend(name string) error {
var err error
options := elb.CreateLoadBalancer{
Name: name,
Listeners: []elb.Listener{
{
InstancePort: 80,
InstanceProtocol: "HTTP",
LoadBalancerPort: 80,
Protocol: "HTTP",
},
},
}
vpc, _ := config.GetBool("juju:elb-use-vpc")
if vpc {
options.Subnets, err = config.GetList("juju:elb-vpc-subnets")
if err != nil {
return err
}
options.SecurityGroups, err = config.GetList("juju:elb-vpc-secgroups")
if err != nil {
return err
}
options.Scheme = "internal"
} else {
options.AvailZones, err = config.GetList("juju:elb-avail-zones")
if err != nil {
return err
}
}
_, err = r.elb().CreateLoadBalancer(&options)
return router.Store(name, name)
}
示例9: start
func (c *container) start() error {
port, err := getPort()
if err != nil {
return err
}
sharedBasedir, _ := config.GetString("docker:sharedfs:hostdir")
sharedMount, _ := config.GetString("docker:sharedfs:mountpoint")
sharedIsolation, _ := config.GetBool("docker:sharedfs:app-isolation")
sharedSalt, _ := config.GetString("docker:sharedfs:salt")
config := docker.HostConfig{}
config.PortBindings = map[docker.Port][]docker.PortBinding{
docker.Port(port + "/tcp"): {{HostIp: "", HostPort: ""}},
docker.Port("22/tcp"): {{HostIp: "", HostPort: ""}},
}
if sharedBasedir != "" && sharedMount != "" {
if sharedIsolation {
var appHostDir string
if sharedSalt != "" {
h := crypto.SHA1.New()
io.WriteString(h, sharedSalt+c.AppName)
appHostDir = fmt.Sprintf("%x", h.Sum(nil))
} else {
appHostDir = c.AppName
}
config.Binds = append(config.Binds, fmt.Sprintf("%s/%s:%s:rw", sharedBasedir, appHostDir, sharedMount))
} else {
config.Binds = append(config.Binds, fmt.Sprintf("%s:%s:rw", sharedBasedir, sharedMount))
}
}
err = dockerCluster().StartContainer(c.ID, &config)
if err != nil {
return err
}
return nil
}
示例10: filterByMemoryUsage
func (s *segregatedScheduler) filterByMemoryUsage(a *app.App, nodes []cluster.Node, maxMemoryRatio float32, TotalMemoryMetadata string) ([]cluster.Node, error) {
if maxMemoryRatio == 0 || TotalMemoryMetadata == "" {
return nodes, nil
}
hosts := make([]string, len(nodes))
for i := range nodes {
hosts[i] = urlToHost(nodes[i].Address)
}
containers, err := s.provisioner.ListContainers(bson.M{"hostaddr": bson.M{"$in": hosts}, "id": bson.M{"$nin": s.ignoredContainers}})
if err != nil {
return nil, err
}
hostReserved := make(map[string]int64)
for _, cont := range containers {
a, err := app.GetByName(cont.AppName)
if err != nil {
return nil, err
}
hostReserved[cont.HostAddr] += a.Plan.Memory
}
megabyte := float64(1024 * 1024)
nodeList := make([]cluster.Node, 0, len(nodes))
for _, node := range nodes {
totalMemory, _ := strconv.ParseFloat(node.Metadata[TotalMemoryMetadata], 64)
shouldAdd := true
if totalMemory != 0 {
maxMemory := totalMemory * float64(maxMemoryRatio)
host := urlToHost(node.Address)
nodeReserved := hostReserved[host] + a.Plan.Memory
if nodeReserved > int64(maxMemory) {
shouldAdd = false
tryingToReserveMB := float64(a.Plan.Memory) / megabyte
reservedMB := float64(hostReserved[host]) / megabyte
limitMB := maxMemory / megabyte
log.Errorf("Node %q has reached its memory limit. "+
"Limit %0.4fMB. Reserved: %0.4fMB. Needed additional %0.4fMB",
host, limitMB, reservedMB, tryingToReserveMB)
}
}
if shouldAdd {
nodeList = append(nodeList, node)
}
}
if len(nodeList) == 0 {
autoScaleEnabled, _ := config.GetBool("docker:auto-scale:enabled")
errMsg := fmt.Sprintf("no nodes found with enough memory for container of %q: %0.4fMB",
a.Name, float64(a.Plan.Memory)/megabyte)
if autoScaleEnabled {
// Allow going over quota temporarily because auto-scale will be
// able to detect this and automatically add a new nodes.
log.Errorf("WARNING: %s. Will ignore memory restrictions.", errMsg)
return nodes, nil
}
return nil, errors.New(errMsg)
}
return nodeList, nil
}
示例11: Init
func Init() {
var loggers []Logger
debug, _ := config.GetBool("debug")
if logFileName, err := config.GetString("log:file"); err == nil {
loggers = append(loggers, NewFileLogger(logFileName, debug))
} else if err == config.ErrMismatchConf {
panic(fmt.Sprintf("%s please see http://docs.tsuru.io/en/latest/reference/config.html#log-file", err))
}
if disableSyslog, _ := config.GetBool("log:disable-syslog"); !disableSyslog {
tag, _ := config.GetString("log:syslog-tag")
if tag == "" {
tag = "tsurud"
}
loggers = append(loggers, NewSyslogLogger(tag, debug))
}
if useStderr, _ := config.GetBool("log:use-stderr"); useStderr {
loggers = append(loggers, NewWriterLogger(os.Stderr, debug))
}
SetLogger(NewMultiLogger(loggers...))
}
示例12: getHostAddr
func getHostAddr(hostID string) string {
var fullAddress string
if seg, _ := config.GetBool("docker:segregate"); seg {
node, _ := segScheduler.GetNode(hostID)
fullAddress = node.Address
} else {
fullAddress = clusterNodes[hostID]
}
url, _ := url.Parse(fullAddress)
host, _, _ := net.SplitHostPort(url.Host)
return host
}
示例13: readConfig
func readConfig(path string) (Config, error) {
cfg := Config{}
configFile := filepath.Join(path, "config.yaml")
err := config.ReadConfigFile(configFile)
if err != nil {
return cfg, err
}
cfg.Id, err = config.GetString("id")
if err != nil {
return cfg, err
}
cfg.Hostname, err = config.GetString("hostname")
if err != nil {
return cfg, err
}
cfg.DiskPath = filepath.Join(path, "disk.qcow")
cfg.Disk, err = config.GetInt("disk")
if err != nil {
return cfg, err
}
cfg.Cpu, err = config.GetInt("cpu")
if err != nil {
return cfg, err
}
cfg.Memory, err = config.GetInt("memory")
if err != nil {
return cfg, err
}
cfg.DNS, err = config.GetString("dns")
if err != nil {
return cfg, err
}
cfg.Docker, err = config.GetString("docker")
if err != nil {
return cfg, err
}
cfg.Extra, err = config.GetString("extra")
if err != nil {
return cfg, err
}
cfg.Route, err = config.GetBool("route")
return cfg, err
}
示例14: start
func (c *container) start(p *dockerProvisioner, app provision.App, isDeploy bool) error {
port, err := getPort()
if err != nil {
return err
}
sharedBasedir, _ := config.GetString("docker:sharedfs:hostdir")
sharedMount, _ := config.GetString("docker:sharedfs:mountpoint")
sharedIsolation, _ := config.GetBool("docker:sharedfs:app-isolation")
sharedSalt, _ := config.GetString("docker:sharedfs:salt")
hostConfig := docker.HostConfig{
Memory: app.GetMemory(),
MemorySwap: app.GetMemory() + app.GetSwap(),
CPUShares: int64(app.GetCpuShare()),
}
if !isDeploy {
hostConfig.RestartPolicy = docker.AlwaysRestart()
hostConfig.PortBindings = map[docker.Port][]docker.PortBinding{
docker.Port(port + "/tcp"): {{HostIP: "", HostPort: ""}},
}
hostConfig.LogConfig = docker.LogConfig{
Type: "syslog",
Config: map[string]string{
"syslog-address": fmt.Sprintf("udp://localhost:%d", getBsSysLogPort()),
},
}
}
hostConfig.SecurityOpt, _ = config.GetList("docker:security-opts")
if sharedBasedir != "" && sharedMount != "" {
if sharedIsolation {
var appHostDir string
if sharedSalt != "" {
h := crypto.SHA1.New()
io.WriteString(h, sharedSalt+c.AppName)
appHostDir = fmt.Sprintf("%x", h.Sum(nil))
} else {
appHostDir = c.AppName
}
hostConfig.Binds = append(hostConfig.Binds, fmt.Sprintf("%s/%s:%s:rw", sharedBasedir, appHostDir, sharedMount))
} else {
hostConfig.Binds = append(hostConfig.Binds, fmt.Sprintf("%s:%s:rw", sharedBasedir, sharedMount))
}
}
err = p.getCluster().StartContainer(c.ID, &hostConfig)
if err != nil {
return err
}
initialStatus := provision.StatusStarting.String()
if isDeploy {
initialStatus = provision.StatusBuilding.String()
}
return c.setStatus(p, initialStatus, false)
}
示例15: legacyAutoScaleRule
func legacyAutoScaleRule() *autoScaleRule {
metadataFilter, _ := config.GetString("docker:auto-scale:metadata-filter")
maxContainerCount, _ := config.GetInt("docker:auto-scale:max-container-count")
scaleDownRatio, _ := config.GetFloat("docker:auto-scale:scale-down-ratio")
preventRebalance, _ := config.GetBool("docker:auto-scale:prevent-rebalance")
return &autoScaleRule{
MaxContainerCount: maxContainerCount,
MetadataFilter: metadataFilter,
ScaleDownRatio: float32(scaleDownRatio),
PreventRebalance: preventRebalance,
Enabled: true,
}
}