本文整理匯總了Golang中github.com/outbrain/orchestrator/go/inst.ReadInstance函數的典型用法代碼示例。如果您正苦於以下問題:Golang ReadInstance函數的具體用法?Golang ReadInstance怎麽用?Golang ReadInstance使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ReadInstance函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestForgetMaster
func (s *TestSuite) TestForgetMaster(c *C) {
_, _ = inst.ReadTopologyInstance(&masterKey)
_, found, _ := inst.ReadInstance(&masterKey)
c.Assert(found, Equals, true)
inst.ForgetInstance(&masterKey)
_, found, _ = inst.ReadInstance(&masterKey)
c.Assert(found, Equals, false)
}
示例2: TestDiscover
func (s *TestSuite) TestDiscover(c *C) {
var err error
_, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", masterKey.Hostname, masterKey.Port)
_, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", slave1Key.Hostname, slave1Key.Port)
_, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", slave2Key.Hostname, slave2Key.Port)
_, err = db.ExecOrchestrator("delete from database_instance where hostname = ? and port = ?", slave3Key.Hostname, slave3Key.Port)
_, found, _ := inst.ReadInstance(&masterKey)
c.Assert(found, Equals, false)
_, _ = inst.ReadTopologyInstance(&slave1Key)
logic.StartDiscovery(slave1Key)
_, found, err = inst.ReadInstance(&slave1Key)
c.Assert(found, Equals, true)
c.Assert(err, IsNil)
}
示例3: GetCandidateSiblingOfIntermediateMaster
// GetCandidateSiblingOfIntermediateMaster chooses the best sibling of a dead intermediate master
// to whom the IM's slaves can be moved.
func GetCandidateSiblingOfIntermediateMaster(intermediateMasterKey *inst.InstanceKey) (*inst.Instance, error) {
intermediateMasterInstance, _, err := inst.ReadInstance(intermediateMasterKey)
if err != nil {
return nil, err
}
siblings, err := inst.ReadSlaveInstances(&intermediateMasterInstance.MasterKey)
if err != nil {
return nil, err
}
if len(siblings) <= 1 {
return nil, log.Errorf("topology_recovery: no siblings found for %+v", *intermediateMasterKey)
}
sort.Sort(sort.Reverse(InstancesByCountSlaves(siblings)))
// In the next series of steps we attempt to return a good replacement.
// None of the below attempts is sure to pick a winning server. Perhaps picked server is not enough up-todate -- but
// this has small likelihood in the general case, and, well, it's an attempt. It's a Plan A, but we have Plan B & C if this fails.
// At first, we try to return an "is_candidate" server in same dc & env
log.Infof("topology_recovery: searching for the best candidate sibling of dead intermediate master")
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) &&
sibling.IsCandidate &&
sibling.DataCenter == intermediateMasterInstance.DataCenter &&
sibling.PhysicalEnvironment == intermediateMasterInstance.PhysicalEnvironment {
log.Infof("topology_recovery: found %+v as the ideal candidate", sibling.Key)
return sibling, nil
}
}
// Go for something else in the same DC & ENV
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) &&
sibling.DataCenter == intermediateMasterInstance.DataCenter &&
sibling.PhysicalEnvironment == intermediateMasterInstance.PhysicalEnvironment {
log.Infof("topology_recovery: found %+v as a replacement in same dc & environment", sibling.Key)
return sibling, nil
}
}
// Nothing in same DC & env, let's just go for some is_candidate
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) && sibling.IsCandidate {
log.Infof("topology_recovery: found %+v as a good candidate", sibling.Key)
return sibling, nil
}
}
// Havent found an "is_candidate". Just whatever is valid.
for _, sibling := range siblings {
sibling := sibling
if isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance, sibling) {
log.Infof("topology_recovery: found %+v as a replacement", sibling.Key)
return sibling, nil
}
}
return nil, log.Errorf("topology_recovery: cannot find candidate sibling of %+v", *intermediateMasterKey)
}
示例4: TestReadTopologyAndInstanceSlave
func (s *TestSuite) TestReadTopologyAndInstanceSlave(c *C) {
i, _ := inst.ReadTopologyInstance(&slave1Key)
iRead, found, _ := inst.ReadInstance(&slave1Key)
c.Assert(found, Equals, true)
c.Assert(iRead.Key.Hostname, Equals, i.Key.Hostname)
c.Assert(iRead.Version, Equals, i.Version)
}
示例5: getClusterName
func getClusterName(clusterAlias string, instanceKey *inst.InstanceKey) (clusterName string) {
var err error
if clusterAlias != "" {
clusterName, err = inst.ReadClusterByAlias(clusterAlias)
if err != nil {
log.Fatale(err)
}
} else {
// deduce cluster by instance
if instanceKey == nil {
instanceKey = thisInstanceKey
}
if instanceKey == nil {
log.Fatalf("Unable to get cluster instances: unresolved instance")
}
instance, _, err := inst.ReadInstance(instanceKey)
if err != nil {
log.Fatale(err)
}
if instance == nil {
log.Fatalf("Instance not found: %+v", *instanceKey)
}
clusterName = instance.ClusterName
}
if clusterName == "" {
log.Fatalf("Unable to determine cluster name")
}
return clusterName
}
示例6: TestReadTopologyAndInstanceMaster
func (s *TestSuite) TestReadTopologyAndInstanceMaster(c *C) {
i, _ := inst.ReadTopologyInstance(&masterKey)
iRead, found, _ := inst.ReadInstance(&masterKey)
c.Assert(found, Equals, true)
c.Assert(iRead.Key.Hostname, Equals, i.Key.Hostname)
c.Assert(iRead.Version, Equals, i.Version)
c.Assert(len(iRead.SlaveHosts), Equals, len(i.SlaveHosts))
}
示例7: validateInstanceIsFound
func validateInstanceIsFound(instanceKey *inst.InstanceKey) (instance *inst.Instance) {
instance, _, err := inst.ReadInstance(instanceKey)
if err != nil {
log.Fatale(err)
}
if instance == nil {
log.Fatalf("Instance not found: %+v", *instanceKey)
}
return instance
}
示例8: discoverInstance
// discoverInstance will attempt discovering an instance (unless it is already up to date) and will
// list down its master and slaves (if any) for further discovery.
func discoverInstance(instanceKey inst.InstanceKey) {
start := time.Now()
instanceKey.Formalize()
if !instanceKey.IsValid() {
return
}
if existsInCacheError := recentDiscoveryOperationKeys.Add(instanceKey.DisplayString(), true, cache.DefaultExpiration); existsInCacheError != nil {
// Just recently attempted
return
}
instance, found, err := inst.ReadInstance(&instanceKey)
if found && instance.IsUpToDate && instance.IsLastCheckValid {
// we've already discovered this one. Skip!
return
}
discoveriesCounter.Inc(1)
// First we've ever heard of this instance. Continue investigation:
instance, err = inst.ReadTopologyInstance(&instanceKey)
// panic can occur (IO stuff). Therefore it may happen
// that instance is nil. Check it.
if instance == nil {
failedDiscoveriesCounter.Inc(1)
log.Warningf("discoverInstance(%+v) instance is nil in %.3fs, error=%+v", instanceKey, time.Since(start).Seconds(), err)
return
}
log.Debugf("Discovered host: %+v, master: %+v, version: %+v in %.3fs", instance.Key, instance.MasterKey, instance.Version, time.Since(start).Seconds())
if atomic.LoadInt64(&isElectedNode) == 0 {
// Maybe this node was elected before, but isn't elected anymore.
// If not elected, stop drilling up/down the topology
return
}
// Investigate slaves:
for _, slaveKey := range instance.SlaveHosts.GetInstanceKeys() {
slaveKey := slaveKey
if slaveKey.IsValid() {
discoveryQueue.Push(slaveKey)
}
}
// Investigate master:
if instance.MasterKey.IsValid() {
discoveryQueue.Push(instance.MasterKey)
}
}
示例9: discoverInstance
// discoverInstance will attempt discovering an instance (unless it is already up to date) and will
// list down its master and slaves (if any) for further discovery.
func discoverInstance(instanceKey inst.InstanceKey) {
instanceKey.Formalize()
if !instanceKey.IsValid() {
return
}
if existsInCacheError := recentDiscoveryOperationKeys.Add(instanceKey.DisplayString(), true, cache.DefaultExpiration); existsInCacheError != nil {
// Just recently attempted
return
}
instance, found, err := inst.ReadInstance(&instanceKey)
if found && instance.IsUpToDate && instance.IsLastCheckValid {
// we've already discovered this one. Skip!
return
}
discoveriesCounter.Inc(1)
// First we've ever heard of this instance. Continue investigation:
instance, err = inst.ReadTopologyInstance(&instanceKey)
// panic can occur (IO stuff). Therefore it may happen
// that instance is nil. Check it.
if instance == nil {
failedDiscoveriesCounter.Inc(1)
log.Warningf("instance is nil in discoverInstance. key=%+v, error=%+v", instanceKey, err)
return
}
log.Debugf("Discovered host: %+v, master: %+v", instance.Key, instance.MasterKey)
if !isElectedNode {
// Maybe this node was elected before, but isn't elected anymore.
// If not elected, stop drilling down to further investigate slaves.
return
}
// Investigate slaves:
for _, slaveKey := range instance.SlaveHosts.GetInstanceKeys() {
discoveryInstanceKeys <- slaveKey
}
// Investigate master:
discoveryInstanceKeys <- instance.MasterKey
}
示例10: ClusterByInstance
func (this *HttpWeb) ClusterByInstance(params martini.Params, r render.Render, req *http.Request, user auth.User) {
instanceKey, err := this.getInstanceKey(params["host"], params["port"])
if err != nil {
r.JSON(200, &APIResponse{Code: ERROR, Message: err.Error()})
return
}
instance, found, err := inst.ReadInstance(&instanceKey)
if (!found) || (err != nil) {
r.JSON(200, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot read instance: %+v", instanceKey)})
return
}
// Willing to accept the case of multiple clusters; we just present one
if instance.ClusterName == "" && err != nil {
r.JSON(200, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)})
return
}
params["clusterName"] = instance.ClusterName
this.Cluster(params, r, req, user)
}
示例11: TestMakeCoMasterAndBackAndFailOthersToBecomeCoMasters
func (s *TestSuite) TestMakeCoMasterAndBackAndFailOthersToBecomeCoMasters(c *C) {
clearTestMaintenance()
slave1, err := inst.MakeCoMaster(&slave1Key)
c.Assert(err, IsNil)
// Now master & slave1 expected to be co-masters. Check!
master, _, _ := inst.ReadInstance(&masterKey)
c.Assert(master.IsSlaveOf(slave1), Equals, true)
c.Assert(slave1.IsSlaveOf(master), Equals, true)
// Verify can't have additional co-masters
_, err = inst.MakeCoMaster(&masterKey)
c.Assert(err, Not(IsNil))
_, err = inst.MakeCoMaster(&slave1Key)
c.Assert(err, Not(IsNil))
_, err = inst.MakeCoMaster(&slave2Key)
c.Assert(err, Not(IsNil))
// reset slave - restore to original state
master, err = inst.ResetSlaveOperation(&masterKey)
c.Assert(err, IsNil)
c.Assert(master.MasterKey.Hostname, Equals, "_")
}
示例12: Cli
//.........這裏部分代碼省略.........
log.Fatale(err)
} else {
for _, instance := range instances {
fmt.Println(instance.Key.DisplayString())
}
}
}
case registerCliCommand("clusters", "Information", `List all clusters known to orchestrator`):
{
clusters, err := inst.ReadClusters()
if err != nil {
log.Fatale(err)
} else {
fmt.Println(strings.Join(clusters, "\n"))
}
}
case registerCliCommand("topology", "Information", `Show an ascii-graph of a replication topology, given a member of that topology`):
{
instanceKey = deduceInstanceKeyIfNeeded(instance, instanceKey)
output, err := inst.ASCIITopology(instanceKey, pattern)
if err != nil {
log.Fatale(err)
}
fmt.Println(output)
}
case registerCliCommand("which-instance", "Information", `Output the fully-qualified hostname:port representation of the given instance, or error if unknown`):
{
if instanceKey == nil {
instanceKey = assignThisInstanceKey()
}
if instanceKey == nil {
log.Fatalf("Unable to get master: unresolved instance")
}
instance, _, err := inst.ReadInstance(instanceKey)
if err != nil {
log.Fatale(err)
}
if instance == nil {
log.Fatalf("Instance not found: %+v", *instanceKey)
}
fmt.Println(instance.Key.DisplayString())
}
case registerCliCommand("which-cluster", "Information", `Output the name of the cluster an instance belongs to, or error if unknown to orchestrator`):
{
clusterName := getClusterName(clusterAlias, instanceKey)
fmt.Println(clusterName)
}
case registerCliCommand("which-cluster-instances", "Information", `Output the list of instances participating in same cluster as given instance`):
{
clusterName := getClusterName(clusterAlias, instanceKey)
instances, err := inst.ReadClusterInstances(clusterName)
if err != nil {
log.Fatale(err)
}
for _, clusterInstance := range instances {
fmt.Println(clusterInstance.Key.DisplayString())
}
}
case registerCliCommand("which-cluster-osc-slaves", "Information", `Output a list of slaves in same cluster as given instance, that could serve as a pt-online-schema-change operation control slaves`):
{
clusterName := getClusterName(clusterAlias, instanceKey)
instances, err := inst.GetClusterOSCSlaves(clusterName)
if err != nil {
log.Fatale(err)
}
for _, clusterInstance := range instances {
示例13: Cli
//.........這裏部分代碼省略.........
log.Fatal("No pattern given")
}
instances, err := inst.FindInstances(pattern)
if err != nil {
log.Fatale(err)
} else {
for _, instance := range instances {
fmt.Println(instance.Key.DisplayString())
}
}
}
case cliCommand("topology"):
{
if instanceKey == nil {
instanceKey = thisInstanceKey
}
if instanceKey == nil {
log.Fatal("Cannot deduce instance:", instance)
}
output, err := inst.ASCIITopology(instanceKey, pattern)
if err != nil {
log.Fatale(err)
}
fmt.Println(output)
}
case cliCommand("which-instance"):
{
if instanceKey == nil {
instanceKey = thisInstanceKey
}
if instanceKey == nil {
log.Fatalf("Unable to get master: unresolved instance")
}
instance, _, err := inst.ReadInstance(instanceKey)
if err != nil {
log.Fatale(err)
}
if instance == nil {
log.Fatalf("Instance not found: %+v", *instanceKey)
}
fmt.Println(instance.Key.DisplayString())
}
case cliCommand("which-master"):
{
if instanceKey == nil {
instanceKey = thisInstanceKey
}
if instanceKey == nil {
log.Fatalf("Unable to get master: unresolved instance")
}
instance, _, err := inst.ReadInstance(instanceKey)
if err != nil {
log.Fatale(err)
}
if instance == nil {
log.Fatalf("Instance not found: %+v", *instanceKey)
}
fmt.Println(instance.MasterKey.DisplayString())
}
case cliCommand("which-cluster"):
{
clusterName := getClusterName(clusterAlias, instanceKey)
fmt.Println(clusterName)
}
case cliCommand("which-cluster-instances"):
{
示例14: RecoverDeadCoMaster
// RecoverDeadCoMaster recovers a dead co-master, complete logic inside
func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) (promotedSlave *inst.Instance, lostSlaves [](*inst.Instance), err error) {
analysisEntry := &topologyRecovery.AnalysisEntry
failedInstanceKey := &analysisEntry.AnalyzedInstanceKey
otherCoMasterKey := &analysisEntry.AnalyzedInstanceMasterKey
otherCoMaster, found, _ := inst.ReadInstance(otherCoMasterKey)
if otherCoMaster == nil || !found {
return nil, lostSlaves, topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: could not read info for co-master %+v of %+v", *otherCoMasterKey, *failedInstanceKey))
}
inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "problem found; will recover")
if !skipProcesses {
if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil {
return nil, lostSlaves, topologyRecovery.AddError(err)
}
}
log.Debugf("topology_recovery: RecoverDeadCoMaster: will recover %+v", *failedInstanceKey)
var coMasterRecoveryType MasterRecoveryType = MasterRecoveryPseudoGTID
if analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology {
coMasterRecoveryType = MasterRecoveryGTID
}
log.Debugf("topology_recovery: RecoverDeadCoMaster: coMasterRecoveryType=%+v", coMasterRecoveryType)
switch coMasterRecoveryType {
case MasterRecoveryGTID:
{
lostSlaves, _, promotedSlave, err = inst.RegroupSlavesGTID(failedInstanceKey, true, nil)
}
case MasterRecoveryPseudoGTID:
{
lostSlaves, _, _, promotedSlave, err = inst.RegroupSlavesPseudoGTIDIncludingSubSlavesOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer)
}
}
topologyRecovery.AddError(err)
mustPromoteOtherCoMaster := config.Config.CoMasterRecoveryMustPromoteOtherCoMaster
if !otherCoMaster.ReadOnly {
log.Debugf("topology_recovery: RecoverDeadCoMaster: other co-master %+v is writeable hence has to be promoted", otherCoMaster.Key)
mustPromoteOtherCoMaster = true
}
log.Debugf("topology_recovery: RecoverDeadCoMaster: mustPromoteOtherCoMaster? %+v", mustPromoteOtherCoMaster)
if promotedSlave != nil {
topologyRecovery.ParticipatingInstanceKeys.AddKey(promotedSlave.Key)
if mustPromoteOtherCoMaster {
log.Debugf("topology_recovery: mustPromoteOtherCoMaster. Verifying that %+v is/can be promoted", *otherCoMasterKey)
promotedSlave, err = replacePromotedSlaveWithCandidate(failedInstanceKey, promotedSlave, otherCoMasterKey)
} else {
// We are allowed to promote any server
promotedSlave, err = replacePromotedSlaveWithCandidate(failedInstanceKey, promotedSlave, nil)
if promotedSlave.DataCenter == otherCoMaster.DataCenter &&
promotedSlave.PhysicalEnvironment == otherCoMaster.PhysicalEnvironment && false {
// and _still_ we prefer to promote the co-master! They're in same env & DC so no worries about geo issues!
promotedSlave, err = replacePromotedSlaveWithCandidate(failedInstanceKey, promotedSlave, otherCoMasterKey)
}
}
topologyRecovery.AddError(err)
}
if promotedSlave != nil {
if mustPromoteOtherCoMaster && !promotedSlave.Key.Equals(otherCoMasterKey) {
topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: could not manage to promote other-co-master %+v; was only able to promote %+v; CoMasterRecoveryMustPromoteOtherCoMaster is true, therefore failing", *otherCoMasterKey, promotedSlave.Key))
promotedSlave = nil
}
}
if promotedSlave != nil {
topologyRecovery.ParticipatingInstanceKeys.AddKey(promotedSlave.Key)
}
// OK, we may have someone promoted. Either this was the other co-master or another slave.
// Noting down that we DO NOT attempt to set a new co-master topology. We are good with remaining with a single master.
// I tried solving the "let's promote a slave and create a new co-master setup" but this turns so complex due to various factors.
// I see this as risky and not worth the questionable benefit.
// Maybe future me is a smarter person and finds a simple solution. Unlikely. I'm getting dumber.
//
// ...
// Now that we're convinved, take a look at what we can be left with:
// Say we started with M1<->M2<-S1, with M2 failing, and we promoted S1.
// We now have M1->S1 (because S1 is promoted), S1->M2 (because that's what it remembers), M2->M1 (because that's what it remembers)
// !! This is an evil 3-node circle that must be broken.
// config.Config.ApplyMySQLPromotionAfterMasterFailover, if true, will cause it to break, because we would RESET SLAVE on S1
// but we want to make sure the circle is broken no matter what.
// So in the case we promoted not-the-other-co-master, we issue a detach-slave-master-host, which is a reversible operation
if promotedSlave != nil && !promotedSlave.Key.Equals(otherCoMasterKey) {
_, err = inst.DetachSlaveMasterHost(&promotedSlave.Key)
topologyRecovery.AddError(log.Errore(err))
}
if promotedSlave != nil && len(lostSlaves) > 0 && config.Config.DetachLostSlavesAfterMasterFailover {
postponedFunction := func() error {
log.Debugf("topology_recovery: - RecoverDeadCoMaster: lost %+v slaves during recovery process; detaching them", len(lostSlaves))
for _, slave := range lostSlaves {
slave := slave
inst.DetachSlaveOperation(&slave.Key)
}
return nil
}
topologyRecovery.AddPostponedFunction(postponedFunction)
//.........這裏部分代碼省略.........
示例15: RecoverDeadIntermediateMaster
// RecoverDeadIntermediateMaster performs intermediate master recovery; complete logic inside
func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) (successorInstance *inst.Instance, err error) {
analysisEntry := &topologyRecovery.AnalysisEntry
failedInstanceKey := &analysisEntry.AnalyzedInstanceKey
recoveryResolved := false
inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, "problem found; will recover")
if !skipProcesses {
if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil {
return nil, topologyRecovery.AddError(err)
}
}
intermediateMasterInstance, _, err := inst.ReadInstance(failedInstanceKey)
if err != nil {
return nil, topologyRecovery.AddError(err)
}
// Find possible candidate
candidateSiblingOfIntermediateMaster, err := GetCandidateSiblingOfIntermediateMaster(intermediateMasterInstance)
relocateSlavesToCandidateSibling := func() {
if candidateSiblingOfIntermediateMaster == nil {
return
}
// We have a candidate
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: will attempt a candidate intermediate master: %+v", candidateSiblingOfIntermediateMaster.Key)
relocatedSlaves, candidateSibling, err, errs := inst.RelocateSlaves(failedInstanceKey, &candidateSiblingOfIntermediateMaster.Key, "")
topologyRecovery.AddErrors(errs)
topologyRecovery.ParticipatingInstanceKeys.AddKey(candidateSiblingOfIntermediateMaster.Key)
if len(relocatedSlaves) == 0 {
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: failed to move any slave to candidate intermediate master (%+v)", candidateSibling.Key)
return
}
if err != nil || len(errs) > 0 {
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: move to candidate intermediate master (%+v) did not complete: %+v", candidateSibling.Key, err)
return
}
if err == nil {
recoveryResolved = true
successorInstance = candidateSibling
inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Relocated %d slaves under candidate sibling: %+v; %d errors: %+v", len(relocatedSlaves), candidateSibling.Key, len(errs), errs))
}
}
// Plan A: find a replacement intermediate master in same Data Center
if candidateSiblingOfIntermediateMaster != nil && candidateSiblingOfIntermediateMaster.DataCenter == intermediateMasterInstance.DataCenter {
relocateSlavesToCandidateSibling()
}
if !recoveryResolved {
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: will next attempt regrouping of slaves")
// Plan B: regroup (we wish to reduce cross-DC replication streams)
_, _, _, regroupPromotedSlave, err := inst.RegroupSlaves(failedInstanceKey, true, nil, nil)
if err != nil {
topologyRecovery.AddError(err)
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: regroup failed on: %+v", err)
}
if regroupPromotedSlave != nil {
topologyRecovery.ParticipatingInstanceKeys.AddKey(regroupPromotedSlave.Key)
}
// Plan C: try replacement intermediate master in other DC...
if candidateSiblingOfIntermediateMaster != nil && candidateSiblingOfIntermediateMaster.DataCenter != intermediateMasterInstance.DataCenter {
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: will next attempt relocating to another DC server")
relocateSlavesToCandidateSibling()
}
}
if !recoveryResolved {
// Do we still have leftovers? Some slaves couldn't move? Couldn't regroup? Only left with regroup's resulting leader?
// nothing moved?
// We don't care much if regroup made it or not. We prefer that it made it, in whcih case we only need to relocate up
// one slave, but the operation is still valid if regroup partially/completely failed. We just promote anything
// not regrouped.
// So, match up all that's left, plan D
log.Debugf("topology_recovery: - RecoverDeadIntermediateMaster: will next attempt to relocate up from %+v", *failedInstanceKey)
var errs []error
var relocatedSlaves [](*inst.Instance)
relocatedSlaves, successorInstance, err, errs = inst.RelocateSlaves(failedInstanceKey, &analysisEntry.AnalyzedInstanceMasterKey, "")
topologyRecovery.AddErrors(errs)
topologyRecovery.ParticipatingInstanceKeys.AddKey(analysisEntry.AnalyzedInstanceMasterKey)
if len(relocatedSlaves) > 0 {
recoveryResolved = true
inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Relocated slaves under: %+v %d errors: %+v", successorInstance.Key, len(errs), errs))
} else {
err = log.Errorf("topology_recovery: RecoverDeadIntermediateMaster failed to match up any slave from %+v", *failedInstanceKey)
topologyRecovery.AddError(err)
}
}
if !recoveryResolved {
successorInstance = nil
}
ResolveRecovery(topologyRecovery, successorInstance)
return successorInstance, err
}