本文整理匯總了Golang中github.com/docker/swarmkit/api.Task.DesiredState方法的典型用法代碼示例。如果您正苦於以下問題:Golang Task.DesiredState方法的具體用法?Golang Task.DesiredState怎麽用?Golang Task.DesiredState使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/docker/swarmkit/api.Task
的用法示例。
在下文中一共展示了Task.DesiredState方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: removeTask
func (g *GlobalOrchestrator) removeTask(ctx context.Context, batch *store.Batch, t *api.Task) {
// set existing task DesiredState to TaskStateShutdown
// TODO(aaronl): optimistic update?
err := batch.Update(func(tx store.Tx) error {
t = store.GetTask(tx, t.ID)
if t != nil {
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTask failed to remove %s", t.ID)
}
}
示例2: Restart
// Restart initiates a new task to replace t if appropriate under the service's
// restart policy.
func (r *Supervisor) Restart(ctx context.Context, tx store.Tx, cluster *api.Cluster, service *api.Service, t api.Task) error {
// TODO(aluzzardi): This function should not depend on `service`.
// Is the old task still in the process of restarting? If so, wait for
// its restart delay to elapse, to avoid tight restart loops (for
// example, when the image doesn't exist).
r.mu.Lock()
oldDelay, ok := r.delays[t.ID]
if ok {
if !oldDelay.waiter {
oldDelay.waiter = true
go r.waitRestart(ctx, oldDelay, cluster, t.ID)
}
r.mu.Unlock()
return nil
}
r.mu.Unlock()
// Sanity check: was the task shut down already by a separate call to
// Restart? If so, we must avoid restarting it, because this will create
// an extra task. This should never happen unless there is a bug.
if t.DesiredState > api.TaskStateRunning {
return errors.New("Restart called on task that was already shut down")
}
t.DesiredState = api.TaskStateShutdown
err := store.UpdateTask(tx, &t)
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to set task desired state to dead")
return err
}
if !r.shouldRestart(ctx, &t, service) {
return nil
}
var restartTask *api.Task
if orchestrator.IsReplicatedService(service) {
restartTask = orchestrator.NewTask(cluster, service, t.Slot, "")
} else if orchestrator.IsGlobalService(service) {
restartTask = orchestrator.NewTask(cluster, service, 0, t.NodeID)
} else {
log.G(ctx).Error("service not supported by restart supervisor")
return nil
}
n := store.GetNode(tx, t.NodeID)
restartTask.DesiredState = api.TaskStateReady
var restartDelay time.Duration
// Restart delay is not applied to drained nodes
if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
var err error
restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay)
if err != nil {
log.G(ctx).WithError(err).Error("invalid restart delay; using default")
restartDelay = orchestrator.DefaultRestartDelay
}
} else {
restartDelay = orchestrator.DefaultRestartDelay
}
}
waitStop := true
// Normally we wait for the old task to stop running, but we skip this
// if the old task is already dead or the node it's assigned to is down.
if (n != nil && n.Status.State == api.NodeStatus_DOWN) || t.Status.State > api.TaskStateRunning {
waitStop = false
}
if err := store.CreateTask(tx, restartTask); err != nil {
log.G(ctx).WithError(err).WithField("task.id", restartTask.ID).Error("task create failed")
return err
}
r.recordRestartHistory(restartTask)
r.DelayStart(ctx, tx, &t, restartTask.ID, restartDelay, waitStop)
return nil
}
示例3: Restart
// Restart initiates a new task to replace t if appropriate under the service's
// restart policy.
func (r *RestartSupervisor) Restart(ctx context.Context, tx store.Tx, service *api.Service, t api.Task) error {
// TODO(aluzzardi): This function should not depend on `service`.
t.DesiredState = api.TaskStateShutdown
err := store.UpdateTask(tx, &t)
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to set task desired state to dead")
return err
}
if !r.shouldRestart(ctx, &t, service) {
return nil
}
var restartTask *api.Task
if isReplicatedService(service) {
restartTask = newTask(service, t.Slot)
} else if isGlobalService(service) {
restartTask = newTask(service, 0)
restartTask.NodeID = t.NodeID
} else {
log.G(ctx).Error("service not supported by restart supervisor")
return nil
}
n := store.GetNode(tx, t.NodeID)
restartTask.DesiredState = api.TaskStateAccepted
var restartDelay time.Duration
// Restart delay does not applied to drained nodes
if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
var err error
restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay)
if err != nil {
log.G(ctx).WithError(err).Error("invalid restart delay; using default")
restartDelay = defaultRestartDelay
}
} else {
restartDelay = defaultRestartDelay
}
}
waitStop := true
// Normally we wait for the old task to stop running, but we skip this
// if the old task is already dead or the node it's assigned to is down.
if (n != nil && n.Status.State == api.NodeStatus_DOWN) || t.Status.State > api.TaskStateRunning {
waitStop = false
}
if err := store.CreateTask(tx, restartTask); err != nil {
log.G(ctx).WithError(err).WithField("task.id", restartTask.ID).Error("task create failed")
return err
}
r.recordRestartHistory(restartTask)
r.DelayStart(ctx, tx, &t, restartTask.ID, restartDelay, waitStop)
return nil
}