本文整理汇总了Golang中github.com/outbrain/golib/log.Errorf函数的典型用法代码示例。如果您正苦于以下问题:Golang Errorf函数的具体用法?Golang Errorf怎么用?Golang Errorf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Errorf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: GetCandidateSiblingOfIntermediateMaster
// GetCandidateSiblingOfIntermediateMaster chooses the best sibling of a dead intermediate master
// to whom the IM's slaves can be moved.
func GetCandidateSiblingOfIntermediateMaster(intermediateMasterKey *inst.InstanceKey) (*inst.Instance, error) {
intermediateMasterInstance, _, err := inst.ReadInstance(intermediateMasterKey)
if err != nil {
return nil, err
}
siblings, err := inst.ReadSlaveInstances(&intermediateMasterInstance.MasterKey)
if err != nil {
return nil, err
}
if len(siblings) <= 1 {
return nil, log.Errorf("topology_recovery: no siblings found for %+v", *intermediateMasterKey)
}
sort.Sort(sort.Reverse(InstancesByCountSlaves(siblings)))
// In the next series of steps we attempt to return a good replacement.
// None of the below attempts is sure to pick a winning server. Perhaps picked server is not enough up-todate -- but
// this has small likelihood in the general case, and, well, it's an attempt. It's a Plan A, but we have Plan B & C if this fails.
// At first, we try to return an "is_candidate" server in same dc & env
log.Infof("topology_recovery: searching for the best candidate sibling of dead intermediate master")
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) &&
sibling.IsCandidate &&
sibling.DataCenter == intermediateMasterInstance.DataCenter &&
sibling.PhysicalEnvironment == intermediateMasterInstance.PhysicalEnvironment {
log.Infof("topology_recovery: found %+v as the ideal candidate", sibling.Key)
return sibling, nil
}
}
// Go for something else in the same DC & ENV
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) &&
sibling.DataCenter == intermediateMasterInstance.DataCenter &&
sibling.PhysicalEnvironment == intermediateMasterInstance.PhysicalEnvironment {
log.Infof("topology_recovery: found %+v as a replacement in same dc & environment", sibling.Key)
return sibling, nil
}
}
// Nothing in same DC & env, let's just go for some is_candidate
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) && sibling.IsCandidate {
log.Infof("topology_recovery: found %+v as a good candidate", sibling.Key)
return sibling, nil
}
}
// Havent found an "is_candidate". Just whatever is valid.
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) {
log.Infof("topology_recovery: found %+v as a replacement", sibling.Key)
return sibling, nil
}
}
return nil, log.Errorf("topology_recovery: cannot find candidate sibling of %+v", *intermediateMasterKey)
}
示例2: readAgentBasicInfo
// readAgentBasicInfo returns the basic data for an agent directly from backend table (no agent access)
func readAgentBasicInfo(hostname string) (Agent, string, error) {
agent := Agent{}
token := ""
query := `
select
hostname,
port,
token,
last_submitted,
mysql_port
from
host_agent
where
hostname = ?
`
err := db.QueryOrchestrator(query, sqlutils.Args(hostname), func(m sqlutils.RowMap) error {
agent.Hostname = m.GetString("hostname")
agent.Port = m.GetInt("port")
agent.LastSubmitted = m.GetString("last_submitted")
agent.MySQLPort = m.GetInt64("mysql_port")
token = m.GetString("token")
return nil
})
if err != nil {
return agent, "", err
}
if token == "" {
return agent, "", log.Errorf("Cannot get agent/token: %s", hostname)
}
return agent, token, nil
}
示例3: readBinlogEventsChunk
// Read (as much as possible of) a chunk of binary log events starting the given startingCoordinates
func readBinlogEventsChunk(instanceKey *InstanceKey, startingCoordinates BinlogCoordinates) ([]BinlogEvent, error) {
events := []BinlogEvent{}
db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port)
if err != nil {
return events, err
}
commandToken := math.TernaryString(startingCoordinates.Type == BinaryLog, "binlog", "relaylog")
if startingCoordinates.LogFile == "" {
return events, log.Errorf("readBinlogEventsChunk: empty binlog file name for %+v.", *instanceKey)
}
query := fmt.Sprintf("show %s events in '%s' FROM %d LIMIT %d", commandToken, startingCoordinates.LogFile, startingCoordinates.LogPos, config.Config.BinlogEventsChunkSize)
err = sqlutils.QueryRowsMap(db, query, func(m sqlutils.RowMap) error {
binlogEvent := BinlogEvent{}
binlogEvent.Coordinates.LogFile = m.GetString("Log_name")
binlogEvent.Coordinates.LogPos = m.GetInt64("Pos")
binlogEvent.Coordinates.Type = startingCoordinates.Type
binlogEvent.NextEventPos = m.GetInt64("End_log_pos")
binlogEvent.EventType = m.GetString("Event_type")
binlogEvent.Info = m.GetString("Info")
events = append(events, binlogEvent)
return nil
})
return events, err
}
示例4: UnresolveHostname
func UnresolveHostname(instanceKey *InstanceKey) (InstanceKey, bool, error) {
unresolvedHostname, err := readUnresolvedHostname(instanceKey.Hostname)
if err != nil {
return *instanceKey, false, log.Errore(err)
}
if unresolvedHostname == instanceKey.Hostname {
// unchanged. Nothing to do
return *instanceKey, false, nil
}
// We unresovled to a different hostname. We will now re-resolve to double-check!
unresolvedKey := &InstanceKey{Hostname: unresolvedHostname, Port: instanceKey.Port}
instance, err := ReadTopologyInstance(unresolvedKey)
if err != nil {
return *instanceKey, false, log.Errore(err)
}
if instance.Key.Hostname != instanceKey.Hostname {
// Resolve(Unresolve(hostname)) != hostname ==> Bad; reject
if *config.RuntimeCLIFlags.SkipUnresolveCheck {
return *instanceKey, false, nil
}
return *instanceKey, false, log.Errorf("Error unresolving; hostname=%s, unresolved=%s, re-resolved=%s; mismatch. Skip/ignore with --skip-unresolve-check", instanceKey.Hostname, unresolvedKey.Hostname, instance.Key.Hostname)
}
return *unresolvedKey, true, nil
}
示例5: ChangeMasterCredentials
// ChangeMasterCredentials issues a CHANGE MASTER TO... MASTER_USER=, MASTER_PASSWORD=...
func ChangeMasterCredentials(instanceKey *InstanceKey, masterUser string, masterPassword string) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if masterUser == "" {
return instance, log.Errorf("Empty user in ChangeMasterCredentials() for %+v", *instanceKey)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("ChangeMasterTo: Cannot change master on: %+v because slave is running", *instanceKey)
}
log.Debugf("ChangeMasterTo: will attempt changing master credentials on %+v", *instanceKey)
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting CHANGE MASTER TO operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
_, err = ExecInstanceNoPrepare(instanceKey, fmt.Sprintf("change master to master_user='%s', master_password='%s'",
masterUser, masterPassword))
if err != nil {
return instance, log.Errore(err)
}
log.Infof("ChangeMasterTo: Changed master credentials on %+v", *instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
示例6: getLastPseudoGTIDEntryInRelayLogs
func getLastPseudoGTIDEntryInRelayLogs(instance *Instance, minBinlogCoordinates *BinlogCoordinates, recordedInstanceRelayLogCoordinates BinlogCoordinates, exhaustiveSearch bool) (*BinlogCoordinates, string, error) {
// Look for last GTID in relay logs:
// Since MySQL does not provide with a SHOW RELAY LOGS command, we heuristically start from current
// relay log (indiciated by Relay_log_file) and walk backwards.
// Eventually we will hit a relay log name which does not exist.
pseudoGTIDRegexp, err := compilePseudoGTIDPattern()
if err != nil {
return nil, "", err
}
currentRelayLog := recordedInstanceRelayLogCoordinates
err = nil
for err == nil {
log.Debugf("Searching for latest pseudo gtid entry in relaylog %+v of %+v, up to pos %+v", currentRelayLog.LogFile, instance.Key, recordedInstanceRelayLogCoordinates)
if resultCoordinates, entryInfo, err := getLastPseudoGTIDEntryInBinlog(pseudoGTIDRegexp, &instance.Key, currentRelayLog.LogFile, RelayLog, minBinlogCoordinates, &recordedInstanceRelayLogCoordinates); err != nil {
return nil, "", err
} else if resultCoordinates != nil {
log.Debugf("Found pseudo gtid entry in %+v, %+v", instance.Key, resultCoordinates)
return resultCoordinates, entryInfo, err
}
if !exhaustiveSearch {
break
}
if minBinlogCoordinates != nil && minBinlogCoordinates.LogFile == currentRelayLog.LogFile {
// We tried and failed with the minBinlogCoordinates hint. We no longer require it,
// and continue with exhaustive search.
minBinlogCoordinates = nil
log.Debugf("Heuristic relaylog search failed; continuing exhaustive search")
// And we do NOT iterate to previous log file: we scan same log faile again, with no heuristic
} else {
currentRelayLog, err = currentRelayLog.PreviousFileCoordinates()
}
}
return nil, "", log.Errorf("Cannot find pseudo GTID entry in relay logs of %+v", instance.Key)
}
示例7: SkipQuery
// SkipQuery skip a single query in a failed replication instance
func SkipQuery(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
if instance.Slave_SQL_Running {
return instance, fmt.Errorf("Slave SQL thread is running on %+v", instanceKey)
}
if instance.LastSQLError == "" {
return instance, fmt.Errorf("No SQL error on %+v", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting skip-query operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
log.Debugf("Skipping one query on %+v", instanceKey)
if instance.UsingOracleGTID {
err = skipQueryOracleGtid(instance)
} else if instance.UsingMariaDBGTID {
return instance, log.Errorf("%+v is replicating with MariaDB GTID. To skip a query first disable GTID, then skip, then enable GTID again", *instanceKey)
} else {
err = skipQueryClassic(instance)
}
if err != nil {
return instance, log.Errore(err)
}
AuditOperation("skip-query", instanceKey, "Skipped one query")
return StartSlave(instanceKey)
}
示例8: InitGraphiteMetrics
func InitGraphiteMetrics() error {
if config.Config.GraphiteAddr == "" {
return nil
}
if config.Config.GraphitePath == "" {
return log.Errorf("No graphite path provided (see GraphitePath config variable). Will not log to graphite")
}
addr, err := net.ResolveTCPAddr("tcp", config.Config.GraphiteAddr)
if err != nil {
return log.Errore(err)
}
graphitePathHostname := process.ThisHostname
if config.Config.GraphiteConvertHostnameDotsToUnderscores {
graphitePathHostname = strings.Replace(graphitePathHostname, ".", "_", -1)
}
graphitePath := config.Config.GraphitePath
graphitePath = strings.Replace(graphitePath, "{hostname}", graphitePathHostname, -1)
log.Debugf("Will log to graphite on %+v, %+v", config.Config.GraphiteAddr, graphitePath)
go func() {
go graphite.Graphite(metrics.DefaultRegistry, 1*time.Minute, graphitePath, addr)
for range graphiteCallbackTick {
for _, f := range graphiteTickCallbacks {
go f()
}
}
}()
return nil
}
示例9: DiscoverAgentInstance
// If a mysql port is available, try to discover against it
func DiscoverAgentInstance(hostname string, port int) error {
agent, err := GetAgent(hostname)
if err != nil {
log.Errorf("Couldn't get agent for %s: %v", hostname, err)
return err
}
instanceKey := agent.GetInstance()
instance, err := inst.ReadTopologyInstance(instanceKey)
if err != nil {
log.Errorf("Failed to read topology for %v", instanceKey)
return err
}
log.Infof("Discovered Agent Instance: %v", instance.Key)
return nil
}
示例10: getLastPseudoGTIDEntryInBinlog
// Try and find the last position of a pseudo GTID query entry in the given binary log.
// Also return the full text of that entry.
// maxCoordinates is the position beyond which we should not read. This is relevant when reading relay logs; in particular,
// the last relay log. We must be careful not to scan for Pseudo-GTID entries past the position executed by the SQL thread.
// maxCoordinates == nil means no limit.
func getLastPseudoGTIDEntryInBinlog(instanceKey *InstanceKey, binlog string, binlogType BinlogType, maxCoordinates *BinlogCoordinates) (*BinlogCoordinates, string, error) {
if binlog == "" {
return nil, "", log.Errorf("getLastPseudoGTIDEntryInBinlog: empty binlog file name for %+v. maxCoordinates = %+v", *instanceKey, maxCoordinates)
}
binlogCoordinates := BinlogCoordinates{LogFile: binlog, LogPos: 0, Type: binlogType}
db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port)
if err != nil {
return nil, "", err
}
moreRowsExpected := true
var nextPos int64 = 0
step := 0
entryText := ""
for moreRowsExpected {
query := ""
if binlogCoordinates.Type == BinaryLog {
query = fmt.Sprintf("show binlog events in '%s' FROM %d LIMIT %d", binlog, nextPos, config.Config.BinlogEventsChunkSize)
} else {
query = fmt.Sprintf("show relaylog events in '%s' LIMIT %d,%d", binlog, (step * config.Config.BinlogEventsChunkSize), config.Config.BinlogEventsChunkSize)
}
moreRowsExpected = false
queryRowsFunc := sqlutils.QueryRowsMap
if config.Config.BufferBinlogEvents {
queryRowsFunc = sqlutils.QueryRowsMapBuffered
}
err = queryRowsFunc(db, query, func(m sqlutils.RowMap) error {
moreRowsExpected = true
nextPos = m.GetInt64("End_log_pos")
binlogEntryInfo := m.GetString("Info")
if matched, _ := regexp.MatchString(config.Config.PseudoGTIDPattern, binlogEntryInfo); matched {
if maxCoordinates != nil && maxCoordinates.SmallerThan(&BinlogCoordinates{LogFile: binlog, LogPos: m.GetInt64("Pos")}) {
// past the limitation
moreRowsExpected = false
return nil
}
binlogCoordinates.LogPos = m.GetInt64("Pos")
entryText = binlogEntryInfo
// Found a match. But we keep searching: we're interested in the LAST entry, and, alas,
// we can only search in ASCENDING order...
}
return nil
})
if err != nil {
return nil, "", err
}
step++
}
// Not found? return nil. an error is reserved to SQL problems.
if binlogCoordinates.LogPos == 0 {
return nil, "", nil
}
return &binlogCoordinates, entryText, err
}
示例11: RecoverDeadIntermediateMaster
func RecoverDeadIntermediateMaster(analysisEntry inst.ReplicationAnalysis) (actionTaken bool, successorInstance *inst.Instance, err error) {
failedInstanceKey := &analysisEntry.AnalyzedInstanceKey
if ok, err := AttemptRecoveryRegistration(&analysisEntry); !ok {
log.Debugf("topology_recovery: found an active or recent recovery on %+v. Will not issue another RecoverDeadIntermediateMaster.", *failedInstanceKey)
return false, nil, err
}
inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, "problem found; will recover")
log.Debugf("topology_recovery: RecoverDeadIntermediateMaster: will recover %+v", *failedInstanceKey)
if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", analysisEntry, nil, true); err != nil {
return false, nil, err
}
// Plan A: find a replacement intermediate master
if candidateSibling, err := GetCandidateSiblingOfIntermediateMaster(failedInstanceKey); err == nil {
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: will attempt a candidate intermediate master: %+v", candidateSibling.Key)
// We have a candidate
if matchedSlaves, candidateSibling, err, errs := inst.MultiMatchSlaves(failedInstanceKey, &candidateSibling.Key, ""); err == nil {
ResolveRecovery(failedInstanceKey, &candidateSibling.Key)
successorInstance = candidateSibling
actionTaken = true
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: move to candidate intermediate master (%+v) went with %d errors", candidateSibling.Key, len(errs))
inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Done. Matched %d slaves under candidate sibling: %+v; %d errors: %+v", len(matchedSlaves), candidateSibling.Key, len(errs), errs))
} else {
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: move to candidate intermediate master (%+v) did not complete: %+v", candidateSibling.Key, err)
inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Matched %d slaves under candidate sibling: %+v; %d errors: %+v", len(matchedSlaves), candidateSibling.Key, len(errs), errs))
}
}
if !actionTaken {
// Either no candidate or only partial match of slaves. Regroup as plan B
inst.RegroupSlaves(failedInstanceKey, nil)
// We don't care much if regroup made it or not. We prefer that it made it, in whcih case we only need to match up
// one slave, but the operation is still valid if regroup partially/completely failed. We just promote anything
// not regrouped.
// So, match up all that's left, plan C
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: will next attempt a match up from %+v", *failedInstanceKey)
var errs []error
var matchedSlaves [](*inst.Instance)
matchedSlaves, successorInstance, err, errs = inst.MatchUpSlaves(failedInstanceKey, "")
if len(matchedSlaves) == 0 {
log.Errorf("topology_recovery: RecoverDeadIntermediateMaster failed to match up any slave from %+v", *failedInstanceKey)
return false, successorInstance, err
}
ResolveRecovery(failedInstanceKey, &successorInstance.Key)
actionTaken = true
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: matched up to %+v", successorInstance.Key)
inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Done. Matched slaves under: %+v %d errors: %+v", successorInstance.Key, len(errs), errs))
}
return actionTaken, successorInstance, err
}
示例12: GetHostAttribute
// GetHostAttribute expects to return a single attribute for a given hostname/attribute-name combination
// or error on empty result
func GetHostAttribute(hostname string, attributeName string) (string, error) {
whereClause := `where hostname=? and attribute_name=?`
attributes, err := getHostAttributesByClause(whereClause, sqlutils.Args(hostname, attributeName))
if err != nil {
return "", err
}
if len(attributeName) == 0 {
return "", log.Errorf("No attribute found for %+v, %+v", hostname, attributeName)
}
return attributes[0].AttributeValue, nil
}
示例13: SearchPseudoGTIDEntryInInstance
// SearchPseudoGTIDEntryInInstance will search for a specific text entry within the binary logs of a given instance.
func SearchPseudoGTIDEntryInInstance(instance *Instance, entryText string, entriesMonotonic bool) (*BinlogCoordinates, error) {
cacheKey := getInstancePseudoGTIDKey(instance, entryText)
coords, found := instancePseudoGTIDEntryCache.Get(cacheKey)
if found {
// This is wonderful. We can skip the tedious GTID search in the binary log
log.Debugf("Found instance Pseudo GTID entry coordinates in cache: %+v, %+v, %+v", instance.Key, entryText, coords)
return coords.(*BinlogCoordinates), nil
}
// Look for GTID entry in given instance:
log.Debugf("Searching for given pseudo gtid entry in %+v. entriesMonotonic=%+v", instance.Key, entriesMonotonic)
currentBinlog := instance.SelfBinlogCoordinates
var err error = nil
for {
log.Debugf("Searching for given pseudo gtid entry in binlog %+v of %+v", currentBinlog.LogFile, instance.Key)
// loop iteration per binary log. This might turn to be a heavyweight operation. We wish to throttle the operation such that
// the instance does not suffer. If it is a slave, we will only act as long as it's not lagging too much.
if instance.SlaveRunning() {
for {
log.Debugf("%+v is a replicating slave. Verifying lag", instance.Key)
instance, err = ReadTopologyInstance(&instance.Key)
if err != nil {
break
}
if instance.HasReasonableMaintenanceReplicationLag() {
// is good to go!
break
}
log.Debugf("lag is too high on %+v. Throttling the search for pseudo gtid entry", instance.Key)
time.Sleep(time.Duration(config.Config.ReasonableMaintenanceReplicationLagSeconds) * time.Second)
}
}
var resultCoordinates BinlogCoordinates
var found bool = false
resultCoordinates, found, err = SearchPseudoGTIDEntryInBinlog(&instance.Key, currentBinlog.LogFile, entryText, entriesMonotonic)
if err != nil {
break
}
if found {
log.Debugf("Matched entry in %+v: %+v", instance.Key, resultCoordinates)
instancePseudoGTIDEntryCache.Set(cacheKey, &resultCoordinates, 0)
return &resultCoordinates, nil
}
// Got here? Unfound. Keep looking
currentBinlog, err = currentBinlog.PreviousFileCoordinates()
if err != nil {
break
}
log.Debugf("- Will move next to binlog %+v", currentBinlog.LogFile)
}
return nil, log.Errorf("Cannot match pseudo GTID entry in binlogs of %+v; err: %+v", instance.Key, err)
}
示例14: FlushBinaryLogsTo
// FlushBinaryLogsTo attempts to 'FLUSH BINARY LOGS' until given binary log is reached
func FlushBinaryLogsTo(instanceKey *InstanceKey, logFile string) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
distance := instance.SelfBinlogCoordinates.FileNumberDistance(&BinlogCoordinates{LogFile: logFile})
if distance < 0 {
return nil, log.Errorf("FlushBinaryLogsTo: target log file %+v is smaller than current log file %+v", logFile, instance.SelfBinlogCoordinates.LogFile)
}
return FlushBinaryLogs(instanceKey, distance)
}
示例15: FlushNontrivialResolveCacheToDatabase
func FlushNontrivialResolveCacheToDatabase() error {
if strings.ToLower(config.Config.HostnameResolveMethod) == "none" {
return log.Errorf("FlushNontrivialResolveCacheToDatabase() called, but HostnameResolveMethod is %+v", config.Config.HostnameResolveMethod)
}
items, _ := HostnameResolveCache()
for hostname := range items {
resolvedHostname, found := hostnameResolvesLightweightCache.Get(hostname)
if found && (resolvedHostname.(string) != hostname) {
WriteResolvedHostname(hostname, resolvedHostname.(string))
}
}
return nil
}