本文整理汇总了Golang中github.com/outbrain/golib/log.Infof函数的典型用法代码示例。如果您正苦于以下问题:Golang Infof函数的具体用法?Golang Infof怎么用?Golang Infof使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Infof函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: GetCandidateSiblingOfIntermediateMaster
// GetCandidateSiblingOfIntermediateMaster chooses the best sibling of a dead intermediate master
// to whom the IM's slaves can be moved.
func GetCandidateSiblingOfIntermediateMaster(intermediateMasterKey *inst.InstanceKey) (*inst.Instance, error) {
intermediateMasterInstance, _, err := inst.ReadInstance(intermediateMasterKey)
if err != nil {
return nil, err
}
siblings, err := inst.ReadSlaveInstances(&intermediateMasterInstance.MasterKey)
if err != nil {
return nil, err
}
if len(siblings) <= 1 {
return nil, log.Errorf("topology_recovery: no siblings found for %+v", *intermediateMasterKey)
}
sort.Sort(sort.Reverse(InstancesByCountSlaves(siblings)))
// In the next series of steps we attempt to return a good replacement.
// None of the below attempts is sure to pick a winning server. Perhaps picked server is not enough up-todate -- but
// this has small likelihood in the general case, and, well, it's an attempt. It's a Plan A, but we have Plan B & C if this fails.
// At first, we try to return an "is_candidate" server in same dc & env
log.Infof("topology_recovery: searching for the best candidate sibling of dead intermediate master")
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) &&
sibling.IsCandidate &&
sibling.DataCenter == intermediateMasterInstance.DataCenter &&
sibling.PhysicalEnvironment == intermediateMasterInstance.PhysicalEnvironment {
log.Infof("topology_recovery: found %+v as the ideal candidate", sibling.Key)
return sibling, nil
}
}
// Go for something else in the same DC & ENV
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) &&
sibling.DataCenter == intermediateMasterInstance.DataCenter &&
sibling.PhysicalEnvironment == intermediateMasterInstance.PhysicalEnvironment {
log.Infof("topology_recovery: found %+v as a replacement in same dc & environment", sibling.Key)
return sibling, nil
}
}
// Nothing in same DC & env, let's just go for some is_candidate
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) && sibling.IsCandidate {
log.Infof("topology_recovery: found %+v as a good candidate", sibling.Key)
return sibling, nil
}
}
// Havent found an "is_candidate". Just whatever is valid.
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) {
log.Infof("topology_recovery: found %+v as a replacement", sibling.Key)
return sibling, nil
}
}
return nil, log.Errorf("topology_recovery: cannot find candidate sibling of %+v", *intermediateMasterKey)
}
示例2: Http
// Http starts serving HTTP (api/web) requests
func Http() {
martini.Env = martini.Prod
m := martini.Classic()
if config.Config.HTTPAuthUser != "" {
m.Use(auth.Basic(config.Config.HTTPAuthUser, config.Config.HTTPAuthPassword))
}
m.Use(gzip.All())
// Render html templates from templates directory
m.Use(render.Renderer(render.Options{
Directory: "resources",
Layout: "templates/layout",
HTMLContentType: "text/html",
}))
m.Use(martini.Static("resources/public"))
go agent.ContinuousOperation()
log.Infof("Starting HTTP on port %d", config.Config.HTTPPort)
http.API.RegisterRequests(m)
// Serve
if config.Config.UseSSL {
log.Info("Serving via SSL")
err := nethttp.ListenAndServeTLS(fmt.Sprintf(":%d", config.Config.HTTPPort), config.Config.SSLCertFile, config.Config.SSLPrivateKeyFile, m)
if err != nil {
log.Fatale(err)
}
} else {
nethttp.ListenAndServe(fmt.Sprintf(":%d", config.Config.HTTPPort), m)
}
}
示例3: ReattachSlave
// ReattachSlave restores a detached slave back into replication
func ReattachSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("Cannot (need not) reattach slave on: %+v because slave is running", instanceKey)
}
isDetached, detachedLogFile, detachedLogPos := instance.ExecBinlogCoordinates.DetachedCoordinates()
if !isDetached {
return instance, fmt.Errorf("Cannot reattach slave on: %+v because slave is not detached", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting reattach-slave operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
_, err = ExecInstanceNoPrepare(instanceKey, fmt.Sprintf(`change master to master_log_file='%s', master_log_pos=%s`, detachedLogFile, detachedLogPos))
if err != nil {
return instance, log.Errore(err)
}
log.Infof("Reattach slave %+v", instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
示例4: DetachSlave
// DetachSlave detaches a slave from replication; forcibly corrupting the binlog coordinates (though in such way
// that is reversible)
func DetachSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("Cannot detach slave on: %+v because slave is running", instanceKey)
}
isDetached, _, _ := instance.ExecBinlogCoordinates.DetachedCoordinates()
if isDetached {
return instance, fmt.Errorf("Cannot (need not) detach slave on: %+v because slave is already detached", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting detach-slave operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
detachedCoordinates := BinlogCoordinates{LogFile: fmt.Sprintf("//%s:%d", instance.ExecBinlogCoordinates.LogFile, instance.ExecBinlogCoordinates.LogPos), LogPos: instance.ExecBinlogCoordinates.LogPos}
// Encode the current coordinates within the log file name, in such way that replication is broken, but info can still be resurrected
_, err = ExecInstanceNoPrepare(instanceKey, fmt.Sprintf(`change master to master_log_file='%s', master_log_pos=%d`, detachedCoordinates.LogFile, detachedCoordinates.LogPos))
if err != nil {
return instance, log.Errore(err)
}
log.Infof("Detach slave %+v", instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
示例5: ChangeMasterCredentials
// ChangeMasterCredentials issues a CHANGE MASTER TO... MASTER_USER=, MASTER_PASSWORD=...
func ChangeMasterCredentials(instanceKey *InstanceKey, masterUser string, masterPassword string) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if masterUser == "" {
return instance, log.Errorf("Empty user in ChangeMasterCredentials() for %+v", *instanceKey)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("ChangeMasterTo: Cannot change master on: %+v because slave is running", *instanceKey)
}
log.Debugf("ChangeMasterTo: will attempt changing master credentials on %+v", *instanceKey)
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting CHANGE MASTER TO operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
_, err = ExecInstanceNoPrepare(instanceKey, fmt.Sprintf("change master to master_user='%s', master_password='%s'",
masterUser, masterPassword))
if err != nil {
return instance, log.Errore(err)
}
log.Infof("ChangeMasterTo: Changed master credentials on %+v", *instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
示例6: ResetSlave
// ResetSlave resets a slave, breaking the replication
func ResetSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("Cannot reset slave on: %+v because slave is running", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting reset-slave operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
// MySQL's RESET SLAVE is done correctly; however SHOW SLAVE STATUS still returns old hostnames etc
// and only resets till after next restart. This leads to orchestrator still thinking the instance replicates
// from old host. We therefore forcibly modify the hostname.
// RESET SLAVE ALL command solves this, but only as of 5.6.3
_, err = ExecInstanceNoPrepare(instanceKey, `change master to master_host='_'`)
if err != nil {
return instance, log.Errore(err)
}
_, err = ExecInstanceNoPrepare(instanceKey, `reset slave /*!50603 all */`)
if err != nil {
return instance, log.Errore(err)
}
log.Infof("Reset slave %+v", instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
示例7: StopSlave
// StopSlave stops replication on a given instance
func StopSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
_, err = ExecInstanceNoPrepare(instanceKey, `stop slave`)
if err != nil {
// Patch; current MaxScale behavior for STOP SLAVE is to throw an error if slave already stopped.
if instance.isMaxScale() && err.Error() == "Error 1199: Slave connection is not running" {
err = nil
}
}
if err != nil {
return instance, log.Errore(err)
}
instance, err = ReadTopologyInstance(instanceKey)
log.Infof("Stopped slave on %+v, Self:%+v, Exec:%+v", *instanceKey, instance.SelfBinlogCoordinates, instance.ExecBinlogCoordinates)
return instance, err
}
示例8: EnableSemiSync
// EnableSemiSync sets the rpl_semi_sync_(master|slave)_enabled variables
// on a given instance.
func EnableSemiSync(instanceKey *InstanceKey, master, slave bool) error {
log.Infof("instance %+v rpl_semi_sync_master_enabled: %t, rpl_semi_sync_slave_enabled: %t", instanceKey, master, slave)
_, err := ExecInstanceNoPrepare(instanceKey,
`set global rpl_semi_sync_master_enabled = ?, global rpl_semi_sync_slave_enabled = ?`,
master, slave)
return err
}
示例9: StartSlaveUntilMasterCoordinates
// StartSlaveUntilMasterCoordinates issuesa START SLAVE UNTIL... statement on given instance
func StartSlaveUntilMasterCoordinates(instanceKey *InstanceKey, masterCoordinates *BinlogCoordinates) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("slave already running: %+v", instanceKey)
}
log.Infof("Will start slave on %+v until coordinates: %+v", instanceKey, masterCoordinates)
if instance.SemiSyncEnforced {
// Send ACK only from promotable instances.
sendACK := instance.PromotionRule != MustNotPromoteRule
// Always disable master setting, in case we're converting a former master.
if err := EnableSemiSync(instanceKey, false, sendACK); err != nil {
return instance, log.Errore(err)
}
}
// MariaDB has a bug: a CHANGE MASTER TO statement does not work properly with prepared statement... :P
// See https://mariadb.atlassian.net/browse/MDEV-7640
// This is the reason for ExecInstanceNoPrepare
_, err = ExecInstanceNoPrepare(instanceKey, fmt.Sprintf("start slave until master_log_file='%s', master_log_pos=%d",
masterCoordinates.LogFile, masterCoordinates.LogPos))
if err != nil {
return instance, log.Errore(err)
}
for upToDate := false; !upToDate; {
instance, err = ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
switch {
case instance.ExecBinlogCoordinates.SmallerThan(masterCoordinates):
time.Sleep(sqlThreadPollDuration)
case instance.ExecBinlogCoordinates.Equals(masterCoordinates):
upToDate = true
case masterCoordinates.SmallerThan(&instance.ExecBinlogCoordinates):
return instance, fmt.Errorf("Start SLAVE UNTIL is past coordinates: %+v", instanceKey)
}
}
instance, err = StopSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
return instance, err
}
示例10: Http
// Http starts serving HTTP (api/web) requests
func Http() {
martini.Env = martini.Prod
m := martini.Classic()
if config.Config.HTTPAuthUser != "" {
m.Use(auth.Basic(config.Config.HTTPAuthUser, config.Config.HTTPAuthPassword))
}
m.Use(gzip.All())
// Render html templates from templates directory
m.Use(render.Renderer(render.Options{
Directory: "resources",
Layout: "templates/layout",
HTMLContentType: "text/html",
}))
m.Use(martini.Static("resources/public"))
if config.Config.UseMutualTLS {
m.Use(ssl.VerifyOUs(config.Config.SSLValidOUs))
}
go agent.ContinuousOperation()
log.Infof("Starting HTTP on port %d", config.Config.HTTPPort)
http.API.RegisterRequests(m)
listenAddress := fmt.Sprintf(":%d", config.Config.HTTPPort)
// Serve
if config.Config.UseSSL {
if len(config.Config.SSLCertFile) == 0 {
log.Fatale(errors.New("UseSSL is true but SSLCertFile is unspecified"))
}
if len(config.Config.SSLPrivateKeyFile) == 0 {
log.Fatale(errors.New("UseSSL is true but SSLPrivateKeyFile is unspecified"))
}
log.Info("Starting HTTPS listener")
tlsConfig, err := ssl.NewTLSConfig(config.Config.SSLCAFile, config.Config.UseMutualTLS)
if err != nil {
log.Fatale(err)
}
if err = ssl.AppendKeyPair(tlsConfig, config.Config.SSLCertFile, config.Config.SSLPrivateKeyFile); err != nil {
log.Fatale(err)
}
if err = ssl.ListenAndServeTLS(listenAddress, m, tlsConfig); err != nil {
log.Fatale(err)
}
} else {
log.Info("Starting HTTP listener")
if err := nethttp.ListenAndServe(listenAddress, m); err != nil {
log.Fatale(err)
}
}
log.Info("Web server started")
}