本文整理汇总了Golang中github.com/tsuru/tsuru/event.Event.Logf方法的典型用法代码示例。如果您正苦于以下问题:Golang Event.Logf方法的具体用法?Golang Event.Logf怎么用?Golang Event.Logf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/tsuru/tsuru/event.Event
的用法示例。
在下文中一共展示了Event.Logf方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: rebalanceIfNeeded
func (a *autoScaleConfig) rebalanceIfNeeded(evt *event.Event, pool string, nodes []*cluster.Node, sResult *scalerResult) error {
if len(sResult.ToRemove) > 0 {
return nil
}
if sResult.ToAdd > 0 {
sResult.ToRebalance = true
}
rebalanceFilter := map[string]string{poolMetadataName: pool}
if !sResult.ToRebalance {
// No action yet, check if we need rebalance
_, gap, err := a.provisioner.containerGapInNodes(nodes)
if err != nil {
return errors.Wrapf(err, "unable to obtain container gap in nodes")
}
buf := safe.NewBuffer(nil)
dryProvisioner, err := a.provisioner.rebalanceContainersByFilter(buf, nil, rebalanceFilter, true)
if err != nil {
return errors.Wrapf(err, "unable to run dry rebalance to check if rebalance is needed. log: %s", buf.String())
}
if dryProvisioner == nil {
return nil
}
_, gapAfter, err := dryProvisioner.containerGapInNodes(nodes)
if err != nil {
return errors.Wrap(err, "couldn't find containers from rebalanced nodes")
}
if math.Abs((float64)(gap-gapAfter)) > 2.0 {
sResult.ToRebalance = true
if sResult.Reason == "" {
sResult.Reason = fmt.Sprintf("gap is %d, after rebalance gap will be %d", gap, gapAfter)
}
}
}
if sResult.ToRebalance {
evt.Logf("running rebalance, for %q: %#v", pool, sResult)
buf := safe.NewBuffer(nil)
writer := io.MultiWriter(buf, evt)
_, err := a.provisioner.rebalanceContainersByFilter(writer, nil, rebalanceFilter, false)
if err != nil {
return errors.Wrapf(err, "unable to rebalance containers. log: %s", buf.String())
}
}
return nil
}
示例2: removeMultipleNodes
func (a *autoScaleConfig) removeMultipleNodes(evt *event.Event, chosenNodes []cluster.Node) error {
nodeAddrs := make([]string, len(chosenNodes))
nodeHosts := make([]string, len(chosenNodes))
for i, node := range chosenNodes {
_, hasIaas := node.Metadata["iaas"]
if !hasIaas {
return errors.Errorf("no IaaS information in node (%s) metadata: %#v", node.Address, node.Metadata)
}
nodeAddrs[i] = node.Address
nodeHosts[i] = net.URLToHost(node.Address)
}
err := a.provisioner.Cluster().UnregisterNodes(nodeAddrs...)
if err != nil {
return errors.Wrapf(err, "unable to unregister nodes (%s) for removal", strings.Join(nodeAddrs, ", "))
}
buf := safe.NewBuffer(nil)
err = a.provisioner.moveContainersFromHosts(nodeHosts, "", buf)
if err != nil {
for _, node := range chosenNodes {
a.provisioner.Cluster().Register(node)
}
return errors.Wrapf(err, "unable to move containers from nodes (%s). log: %s", strings.Join(nodeAddrs, ", "), buf.String())
}
wg := sync.WaitGroup{}
for i := range chosenNodes {
wg.Add(1)
go func(i int) {
defer wg.Done()
node := chosenNodes[i]
m, err := iaas.FindMachineByIdOrAddress(node.Metadata["iaas-id"], net.URLToHost(node.Address))
if err != nil {
evt.Logf("unable to find machine for removal in iaas: %s", err)
return
}
err = m.Destroy()
if err != nil {
evt.Logf("unable to destroy machine in IaaS: %s", err)
}
}(i)
}
wg.Wait()
return nil
}
示例3: addNode
func (a *autoScaleConfig) addNode(evt *event.Event, modelNodes []*cluster.Node) (*cluster.Node, error) {
metadata, err := chooseMetadataFromNodes(modelNodes)
if err != nil {
return nil, err
}
_, hasIaas := metadata["iaas"]
if !hasIaas {
return nil, errors.Errorf("no IaaS information in nodes metadata: %#v", metadata)
}
machine, err := iaas.CreateMachineForIaaS(metadata["iaas"], metadata)
if err != nil {
return nil, errors.Wrap(err, "unable to create machine")
}
newAddr := machine.FormatNodeAddress()
evt.Logf("new machine created: %s - Waiting for docker to start...", newAddr)
createOpts := provision.AddNodeOptions{
Address: newAddr,
Metadata: metadata,
WaitTO: a.WaitTimeNewMachine,
CaCert: machine.CaCert,
ClientCert: machine.ClientCert,
ClientKey: machine.ClientKey,
}
err = a.provisioner.AddNode(createOpts)
if err != nil {
machine.Destroy()
a.provisioner.Cluster().Unregister(newAddr)
return nil, errors.Wrapf(err, "error adding new node %s", newAddr)
}
createdNode, err := a.provisioner.Cluster().GetNode(newAddr)
if err != nil {
machine.Destroy()
a.provisioner.Cluster().Unregister(newAddr)
return nil, errors.Wrapf(err, "error retrieving new node %s", newAddr)
}
evt.Logf("new machine created: %s - started!", newAddr)
return &createdNode, nil
}