本文整理汇总了Golang中github.com/tsuru/tsuru/log.Debugf函数的典型用法代码示例。如果您正苦于以下问题:Golang Debugf函数的具体用法?Golang Debugf怎么用?Golang Debugf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Debugf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: RecreateContainers
// RecreateContainers relaunch all bs containers in the cluster for the given
// DockerProvisioner, logging progress to the given writer.
//
// It assumes that the given writer is thread safe.
func RecreateContainers(p DockerProvisioner, w io.Writer) error {
cluster := p.Cluster()
nodes, err := cluster.UnfilteredNodes()
if err != nil {
return err
}
errChan := make(chan error, len(nodes))
wg := sync.WaitGroup{}
log.Debugf("[bs containers] recreating %d containers", len(nodes))
for i := range nodes {
wg.Add(1)
go func(i int) {
defer wg.Done()
node := &nodes[i]
pool := node.Metadata["pool"]
log.Debugf("[bs containers] recreating container in %s [%s]", node.Address, pool)
fmt.Fprintf(w, "relaunching bs container in the node %s [%s]\n", node.Address, pool)
err := createContainer(node.Address, pool, p, true)
if err != nil {
msg := fmt.Sprintf("[bs containers] failed to create container in %s [%s]: %s", node.Address, pool, err)
log.Error(msg)
err = errors.New(msg)
errChan <- err
}
}(i)
}
wg.Wait()
close(errChan)
return <-errChan
}
示例2: chooseNode
// chooseNode finds which is the node with the minimum number
// of containers and returns it
func (segregatedScheduler) chooseNode(nodes []node, contName string) (node, error) {
var chosenNode node
hosts := make([]string, len(nodes))
hostsMap := make(map[string]node)
// Only hostname is saved in the docker containers collection
// so we need to extract and map then to the original node.
for i, node := range nodes {
host := urlToHost(node.Address)
hosts[i] = host
hostsMap[host] = node
}
log.Debugf("[scheduler] Possible nodes for container %s: %#v", contName, hosts)
hostMutex.Lock()
defer hostMutex.Unlock()
countMap, err := aggregateNodesByHost(hosts)
if err != nil {
return chosenNode, err
}
// Finally finding the host with the minimum amount of containers.
var minHost string
minCount := math.MaxInt32
for _, host := range hosts {
count := countMap[host]
if count < minCount {
minCount = count
minHost = host
}
}
chosenNode = hostsMap[minHost]
log.Debugf("[scheduler] Chosen node for container %s: %#v Count: %d", contName, chosenNode, minCount)
coll := collection()
defer coll.Close()
err = coll.Update(bson.M{"name": contName}, bson.M{"$set": bson.M{"hostaddr": minHost}})
return chosenNode, err
}
示例3: chooseNode
// chooseNode finds which is the node with the minimum number
// of containers and returns it
func (s *segregatedScheduler) chooseNode(nodes []cluster.Node, contName string, appName, process string) (string, error) {
var chosenNode string
hosts, hostsMap := s.nodesToHosts(nodes)
log.Debugf("[scheduler] Possible nodes for container %s: %#v", contName, hosts)
s.hostMutex.Lock()
defer s.hostMutex.Unlock()
hostCountMap, err := s.aggregateContainersByHost(hosts)
if err != nil {
return chosenNode, err
}
appCountMap, err := s.aggregateContainersByHostAppProcess(hosts, appName, process)
if err != nil {
return chosenNode, err
}
// Finally finding the host with the minimum value for
// the pair [appCount, hostCount]
var minHost string
minCount := math.MaxInt32
for _, host := range hosts {
adjCount := appCountMap[host]*10000 + hostCountMap[host]
if adjCount < minCount {
minCount = adjCount
minHost = host
}
}
chosenNode = hostsMap[minHost]
log.Debugf("[scheduler] Chosen node for container %s: %#v Count: %d", contName, chosenNode, minCount)
if contName != "" {
coll := s.provisioner.collection()
defer coll.Close()
err = coll.Update(bson.M{"name": contName}, bson.M{"$set": bson.M{"hostaddr": minHost}})
}
return chosenNode, err
}
示例4: Execute
// Execute executes the pipeline.
//
// The execution starts in the forward phase, calling the Forward function of
// all actions. If none of the Forward calls return error, the pipeline
// execution ends in the forward phase and is "committed".
//
// If any of the Forward calls fails, the executor switches to the backward phase
// (roll back) and call the Backward function for each action completed. It
// does not call the Backward function of the action that has failed.
//
// After rolling back all completed actions, it returns the original error
// returned by the action that failed.
func (p *Pipeline) Execute(params ...interface{}) error {
var (
r Result
err error
)
if len(p.actions) == 0 {
return ErrPipelineNoActions
}
fwCtx := FWContext{Params: params}
for i, a := range p.actions {
log.Debugf("[pipeline] running the Forward for the %s action", a.Name)
if a.Forward == nil {
err = ErrPipelineForwardMissing
} else if len(fwCtx.Params) < a.MinParams {
err = ErrPipelineFewParameters
} else {
r, err = a.Forward(fwCtx)
a.rMutex.Lock()
a.result = r
a.rMutex.Unlock()
fwCtx.Previous = r
}
if err != nil {
log.Debugf("[pipeline] error running the Forward for the %s action - %s", a.Name, err)
if a.OnError != nil {
a.OnError(fwCtx, err)
}
p.rollback(i-1, params)
return err
}
}
return nil
}
示例5: chooseContainerFromMaxContainersCountInNode
// chooseNodeWithMaxContainersCount finds which is the node with maximum number
// of containers and returns it
func (s *segregatedScheduler) chooseContainerFromMaxContainersCountInNode(nodes []cluster.Node, appName, process string) (string, error) {
hosts, hostsMap := s.nodesToHosts(nodes)
log.Debugf("[scheduler] Possible nodes for remove a container: %#v", hosts)
s.hostMutex.Lock()
defer s.hostMutex.Unlock()
hostCountMap, err := s.aggregateContainersByHost(hosts)
if err != nil {
return "", err
}
appCountMap, err := s.aggregateContainersByHostAppProcess(hosts, appName, process)
if err != nil {
return "", err
}
// Finally finding the host with the maximum value for
// the pair [appCount, hostCount]
var maxHost string
maxCount := 0
for _, host := range hosts {
adjCount := appCountMap[host]*10000 + hostCountMap[host]
if adjCount > maxCount {
maxCount = adjCount
maxHost = host
}
}
chosenNode := hostsMap[maxHost]
log.Debugf("[scheduler] Chosen node for remove a container: %#v Count: %d", chosenNode, hostCountMap[maxHost])
containerID, err := s.getContainerFromHost(maxHost, appName, process)
if err != nil {
return "", err
}
return containerID, err
}
示例6: Handler
func (b beanstalkdFactory) Handler(f func(*Message), names ...string) (Handler, error) {
name := "default"
if len(names) > 0 {
name = names[0]
}
return &executor{
inner: func() {
if message, err := get(5e9, names...); err == nil {
log.Debugf("Dispatching %q message to handler function.", message.Action)
go func(m *Message) {
f(m)
if m.fail {
q := beanstalkdQ{name: name}
q.Put(m, 0)
}
}(message)
} else {
log.Debugf("Failed to get message from the queue: %s. Trying again...", err)
if e, ok := err.(*net.OpError); ok && e.Op == "dial" {
time.Sleep(5e9)
}
}
},
}, nil
}
示例7: ensureContainersStarted
func ensureContainersStarted(p DockerProvisioner, w io.Writer, relaunch bool, names []string, nodes ...cluster.Node) error {
if w == nil {
w = ioutil.Discard
}
var err error
if len(names) == 0 {
names, err = nodecontainer.AllNodeContainersNames()
if err != nil {
return err
}
}
if len(nodes) == 0 {
nodes, err = p.Cluster().UnfilteredNodes()
if err != nil {
return err
}
}
errChan := make(chan error, len(nodes)*len(names))
wg := sync.WaitGroup{}
log.Debugf("[node containers] recreating %d containers", len(nodes)*len(names))
recreateContainer := func(node *cluster.Node, confName string) {
defer wg.Done()
pool := node.Metadata["pool"]
containerConfig, confErr := nodecontainer.LoadNodeContainer(pool, confName)
if confErr != nil {
errChan <- confErr
return
}
if !containerConfig.Valid() {
return
}
log.Debugf("[node containers] recreating container %q in %s [%s]", confName, node.Address, pool)
fmt.Fprintf(w, "relaunching node container %q in the node %s [%s]\n", confName, node.Address, pool)
confErr = create(containerConfig, node, pool, p, relaunch)
if confErr != nil {
confErr = errors.Wrapf(confErr, "[node containers] failed to create container in %s [%s]", node.Address, pool)
errChan <- log.WrapError(confErr)
}
}
for i := range nodes {
wg.Add(1)
go func(node *cluster.Node) {
defer wg.Done()
for j := range names {
wg.Add(1)
go recreateContainer(node, names[j])
}
}(&nodes[i])
}
wg.Wait()
close(errChan)
var allErrors []error
for err = range errChan {
allErrors = append(allErrors, err)
}
if len(allErrors) == 0 {
return nil
}
return tsuruErrors.NewMultiError(allErrors...)
}
示例8: Handler
func (factory redismqQFactory) Handler(f func(*Message), names ...string) (Handler, error) {
name := "default"
if len(names) > 0 {
name = names[0]
}
consumerName := fmt.Sprintf("handler-%d", time.Now().UnixNano())
queue, err := factory.get(name, consumerName)
if err != nil {
return nil, err
}
return &executor{
inner: func() {
if message, err := queue.Get(5e9); err == nil {
log.Debugf("Dispatching %q message to handler function.", message.Action)
go func(m *Message) {
f(m)
if m.fail {
queue.Put(m, 0)
}
}(message)
} else {
log.Debugf("Failed to get message from the queue: %s. Trying again...", err)
if e, ok := err.(*net.OpError); ok && e.Op == "dial" {
time.Sleep(5e9)
}
}
},
}, nil
}
示例9: Execute
// Execute executes the pipeline.
//
// The execution starts in the forward phase, calling the Forward function of
// all actions. If none of the Forward calls return error, the pipeline
// execution ends in the forward phase and is "committed".
//
// If any of the Forward calls fails, the executor switches to the backward phase
// (roll back) and call the Backward function for each action completed. It
// does not call the Backward function of the action that has failed.
//
// After rolling back all completed actions, it returns the original error
// returned by the action that failed.
func (p *Pipeline) Execute(params ...interface{}) error {
var (
r Result
err error
)
if len(p.actions) == 0 {
return errors.New("No actions to execute.")
}
fwCtx := FWContext{Params: params}
for i, a := range p.actions {
log.Debugf("[pipeline] running the Forward for the %s action", a.Name)
if a.Forward == nil {
err = errors.New("All actions must define the forward function.")
} else if len(fwCtx.Params) < a.MinParams {
err = errors.New("Not enough parameters to call Action.Forward.")
} else {
r, err = a.Forward(fwCtx)
a.rMutex.Lock()
a.result = r
a.rMutex.Unlock()
fwCtx.Previous = r
}
if err != nil {
log.Debugf("[pipeline] error running the Forward for the %s action - %s", a.Name, err)
if a.OnError != nil {
a.OnError(fwCtx, err)
}
p.rollback(i-1, params)
return err
}
}
return nil
}
示例10: RemoveNamedContainers
func RemoveNamedContainers(p DockerProvisioner, w io.Writer, name string, pool string) error {
var nodes []cluster.Node
var err error
if pool == "" {
nodes, err = p.Cluster().UnfilteredNodes()
} else {
nodes, err = p.Cluster().UnfilteredNodesForMetadata(map[string]string{"pool": pool})
}
if err != nil {
return errors.WithStack(err)
}
errChan := make(chan error, len(nodes))
wg := sync.WaitGroup{}
removeContainer := func(node *cluster.Node) {
pool := node.Metadata["pool"]
client, err := node.Client()
if err != nil {
errChan <- err
return
}
err = client.StopContainer(name, 10)
if err != nil {
if _, ok := err.(*docker.NoSuchContainer); ok {
log.Debugf("[node containers] no such container %q in %s [%s]", name, node.Address, pool)
fmt.Fprintf(w, "no such node container %q in the node %s [%s]\n", name, node.Address, pool)
return
}
if _, ok := err.(*docker.ContainerNotRunning); !ok {
err = errors.Wrapf(err, "[node containers] failed to stop container in %s [%s]", node.Address, pool)
errChan <- err
return
}
}
log.Debugf("[node containers] removing container %q in %s [%s]", name, node.Address, pool)
fmt.Fprintf(w, "removing node container %q in the node %s [%s]\n", name, node.Address, pool)
err = client.RemoveContainer(docker.RemoveContainerOptions{ID: name, Force: true})
if err != nil {
err = errors.Wrapf(err, "[node containers] failed to remove container in %s [%s]", node.Address, pool)
errChan <- err
}
}
for i := range nodes {
wg.Add(1)
go func(node *cluster.Node) {
removeContainer(node)
wg.Done()
}(&nodes[i])
}
wg.Wait()
close(errChan)
var allErrors []error
for err := range errChan {
allErrors = append(allErrors, err)
}
if len(allErrors) == 0 {
return nil
}
return tsuruErrors.NewMultiError(allErrors...)
}
示例11: ensureContainersStarted
func ensureContainersStarted(p DockerProvisioner, w io.Writer, relaunch bool, nodes ...cluster.Node) error {
if w == nil {
w = ioutil.Discard
}
confNames, err := scopedconfig.FindAllScopedConfigNames(nodeContainerCollection)
if err != nil {
return err
}
if len(nodes) == 0 {
nodes, err = p.Cluster().UnfilteredNodes()
if err != nil {
return err
}
}
errChan := make(chan error, len(nodes))
wg := sync.WaitGroup{}
log.Debugf("[node containers] recreating %d containers", len(nodes))
recreateContainer := func(node *cluster.Node, confName string) {
defer wg.Done()
pool := node.Metadata["pool"]
containerConfig, confErr := LoadNodeContainer(pool, confName)
if confErr != nil {
errChan <- confErr
return
}
log.Debugf("[node containers] recreating container %q in %s [%s]", confName, node.Address, pool)
fmt.Fprintf(w, "relaunching node container %q in the node %s [%s]\n", confName, node.Address, pool)
confErr = containerConfig.create(node.Address, pool, p, relaunch)
if confErr != nil {
msg := fmt.Sprintf("[node containers] failed to create container in %s [%s]: %s", node.Address, pool, confErr)
log.Error(msg)
errChan <- errors.New(msg)
}
}
for i := range nodes {
wg.Add(1)
go func(node *cluster.Node) {
defer wg.Done()
for j := range confNames {
wg.Add(1)
go recreateContainer(node, confNames[j])
}
}(&nodes[i])
}
wg.Wait()
close(errChan)
var allErrors []string
for err = range errChan {
allErrors = append(allErrors, err.Error())
}
if len(allErrors) == 0 {
return nil
}
return fmt.Errorf("multiple errors: %s", strings.Join(allErrors, ", "))
}
示例12: callback
func (s *SAMLAuthScheme) callback(params map[string]string) error {
xml, ok := params["xml"]
if !ok {
return ErrMissingFormValueError
}
log.Debugf("Data received from identity provider: %s", xml)
response, err := s.Parser.Parse(xml)
if err != nil {
log.Errorf("Got error while parsing IDP data: %s", err)
return ErrParseResponseError
}
sp, err := s.createSP()
if err != nil {
return err
}
err = validateResponse(response, sp)
if err != nil {
log.Errorf("Got error while validing IDP data: %s", err)
if strings.Contains(err.Error(), "assertion has expired") {
return ErrRequestNotFound
}
return ErrParseResponseError
}
requestId, err := getRequestIdFromResponse(response)
if requestId == "" && err == ErrRequestIdNotFound {
log.Debugf("Request ID %s not found: %s", requestId, err.Error())
return err
}
req := request{}
err = req.getById(requestId)
if err != nil {
return err
}
email, err := getUserIdentity(response)
if err != nil {
return err
}
if !validation.ValidateEmail(email) {
if strings.Contains(email, "@") {
return &tsuruErrors.ValidationError{Message: "attribute user identity contains invalid character"}
}
// we need create a unique email for the user
email = strings.Join([]string{email, "@", s.idpHost()}, "")
if !validation.ValidateEmail(email) {
return &tsuruErrors.ValidationError{Message: "could not create valid email with auth:saml:idp-attribute-user-identity"}
}
}
req.Authed = true
req.Email = email
req.Update()
return nil
}
示例13: commit
// commit commits an image in docker based in the container
// and returns the image repository.
func (c *container) commit() (string, error) {
log.Debugf("commiting container %s", c.ID)
repository := assembleImageName(c.AppName)
opts := docker.CommitContainerOptions{Container: c.ID, Repository: repository}
image, err := dockerCluster().CommitContainer(opts)
if err != nil {
log.Errorf("Could not commit docker image: %s", err)
return "", err
}
log.Debugf("image %s generated from container %s", image.ID, c.ID)
pushImage(repository)
return repository, nil
}
示例14: HandleError
func (h *NodeHealer) HandleError(node *cluster.Node) time.Duration {
h.Lock()
if h.locks[node.Address] == nil {
h.locks[node.Address] = &sync.Mutex{}
}
h.Unlock()
h.locks[node.Address].Lock()
defer h.locks[node.Address].Unlock()
failures := node.FailureCount()
if failures < h.failuresBeforeHealing {
log.Debugf("%d failures detected in node %q, waiting for more failures before healing.", failures, node.Address)
return h.disabledTime
}
if !node.HasSuccess() {
log.Debugf("Node %q has never been successfully reached, healing won't run on it.", node.Address)
return h.disabledTime
}
_, hasIaas := node.Metadata["iaas"]
if !hasIaas {
log.Debugf("Node %q doesn't have IaaS information, healing won't run on it.", node.Address)
return h.disabledTime
}
healingCounter, err := healingCountFor("node", node.Address, consecutiveHealingsTimeframe)
if err != nil {
log.Errorf("Node healing: couldn't verify number of previous healings for %s: %s", node.Address, err.Error())
return h.disabledTime
}
if healingCounter > consecutiveHealingsLimitInTimeframe {
log.Errorf("Node healing: number of healings for node %s in the last %d minutes exceeds limit of %d: %d",
node.Address, consecutiveHealingsTimeframe/time.Minute, consecutiveHealingsLimitInTimeframe, healingCounter)
return h.disabledTime
}
log.Errorf("Initiating healing process for node %q after %d failures.", node.Address, failures)
evt, err := NewHealingEvent(*node)
if err != nil {
log.Errorf("Error trying to insert healing event: %s", err.Error())
return h.disabledTime
}
createdNode, err := h.healNode(node)
if err != nil {
log.Errorf("Error healing: %s", err.Error())
}
err = evt.Update(createdNode, err)
if err != nil {
log.Errorf("Error trying to update healing event: %s", err.Error())
}
if createdNode.Address != "" {
return 0
}
return h.disabledTime
}
示例15: Heal
// Heal iterates through all juju machines verifying if
// a juju-machine-agent is down and heal these machines.
func (h instanceMachineHealer) Heal() error {
p := JujuProvisioner{}
output, _ := p.getOutput()
for _, machine := range output.Machines {
if machine.AgentState == "down" {
log.Debugf("Healing juju-machine-agent in machine %s", machine.InstanceID)
upStartCmd("stop", "juju-machine-agent", machine.IPAddress)
upStartCmd("start", "juju-machine-agent", machine.IPAddress)
} else {
log.Debugf("juju-machine-agent for machine %s needs no cure, skipping...", machine.InstanceID)
}
}
return nil
}