本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.UpdateTask函數的典型用法代碼示例。如果您正苦於以下問題:Golang UpdateTask函數的具體用法?Golang UpdateTask怎麽用?Golang UpdateTask使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了UpdateTask函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: useExistingTask
func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) {
var removeTasks []*api.Task
for _, t := range slot {
if t != existing {
removeTasks = append(removeTasks, t)
}
}
if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning {
_, err := u.store.Batch(func(batch *store.Batch) error {
u.removeOldTasks(ctx, batch, removeTasks)
if existing.DesiredState != api.TaskStateRunning {
err := batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, existing.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to start it", existing.ID)
}
if t.DesiredState >= api.TaskStateRunning {
return fmt.Errorf("task %s was already started when reached by updater", existing.ID)
}
t.DesiredState = api.TaskStateRunning
return store.UpdateTask(tx, t)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("starting task %s failed", existing.ID)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("updater batch transaction failed")
}
}
}
示例2: removeOldTasks
// removeOldTasks shuts down the given tasks and returns one of the tasks that
// was shut down, or an error.
func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) (*api.Task, error) {
var (
lastErr error
removedTask *api.Task
)
for _, original := range removeTasks {
err := batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to shut it down", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
})
if err != nil {
lastErr = err
} else {
removedTask = original
}
}
if removedTask == nil {
return nil, lastErr
}
return removedTask, nil
}
示例3: StartNow
// StartNow moves the task into the RUNNING state so it will proceed to start
// up.
func (r *Supervisor) StartNow(tx store.Tx, taskID string) error {
t := store.GetTask(tx, taskID)
if t == nil || t.DesiredState >= api.TaskStateRunning {
return nil
}
t.DesiredState = api.TaskStateRunning
return store.UpdateTask(tx, t)
}
示例4: commitAllocatedTask
func (a *Allocator) commitAllocatedTask(ctx context.Context, batch *store.Batch, t *api.Task) error {
return batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, t)
if err == store.ErrSequenceConflict {
storeTask := store.GetTask(tx, t.ID)
taskUpdateNetworks(storeTask, t.Networks)
taskUpdateEndpoint(storeTask, t.Endpoint)
if storeTask.Status.State < api.TaskStatePending {
storeTask.Status = t.Status
}
err = store.UpdateTask(tx, storeTask)
}
return errors.Wrapf(err, "failed updating state in store transaction for task %s", t.ID)
})
}
示例5: updateTask
func (u *Updater) updateTask(ctx context.Context, original, updated *api.Task) error {
log.G(ctx).Debugf("replacing %s with %s", original.ID, updated.ID)
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
Task: &api.Task{ID: updated.ID},
Checks: []state.TaskCheckFunc{state.TaskCheckID},
})
defer cancel()
var delayStartCh <-chan struct{}
// Atomically create the updated task and bring down the old one.
err := u.store.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to update it", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
if err := store.UpdateTask(tx, t); err != nil {
return err
}
if err := store.CreateTask(tx, updated); err != nil {
return err
}
// Wait for the old task to stop or time out, and then set the new one
// to RUNNING.
delayStartCh = u.restarts.DelayStart(ctx, tx, original, updated.ID, 0, true)
return nil
})
if err != nil {
return err
}
<-delayStartCh
// Wait for the new task to come up.
// TODO(aluzzardi): Consider adding a timeout here.
for {
select {
case e := <-taskUpdates:
updated = e.(state.EventUpdateTask).Task
if updated.Status.State >= api.TaskStateRunning {
return nil
}
case <-u.stopChan:
return nil
}
}
}
示例6: processTaskUpdates
func (d *Dispatcher) processTaskUpdates() {
d.taskUpdatesLock.Lock()
if len(d.taskUpdates) == 0 {
d.taskUpdatesLock.Unlock()
return
}
taskUpdates := d.taskUpdates
d.taskUpdates = make(map[string]*api.TaskStatus)
d.taskUpdatesLock.Unlock()
log := log.G(d.ctx).WithFields(logrus.Fields{
"method": "(*Dispatcher).processTaskUpdates",
})
_, err := d.store.Batch(func(batch *store.Batch) error {
for taskID, status := range taskUpdates {
err := batch.Update(func(tx store.Tx) error {
logger := log.WithField("task.id", taskID)
task := store.GetTask(tx, taskID)
if task == nil {
logger.Errorf("task unavailable")
return nil
}
logger = logger.WithField("state.transition", fmt.Sprintf("%v->%v", task.Status.State, status.State))
if task.Status == *status {
logger.Debug("task status identical, ignoring")
return nil
}
if task.Status.State > status.State {
logger.Debug("task status invalid transition")
return nil
}
task.Status = *status
if err := store.UpdateTask(tx, task); err != nil {
logger.WithError(err).Error("failed to update task status")
return nil
}
logger.Debug("task status updated")
return nil
})
if err != nil {
log.WithError(err).Error("dispatcher transaction failed")
}
}
return nil
})
if err != nil {
log.WithError(err).Error("dispatcher batch failed")
}
}
示例7: removeTask
func (g *GlobalOrchestrator) removeTask(ctx context.Context, batch *store.Batch, t *api.Task) {
// set existing task DesiredState to TaskStateShutdown
// TODO(aaronl): optimistic update?
err := batch.Update(func(tx store.Tx) error {
t = store.GetTask(tx, t.ID)
if t != nil {
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTask failed to remove %s", t.ID)
}
}
示例8: removeTasks
func (r *ReplicatedOrchestrator) removeTasks(ctx context.Context, batch *store.Batch, service *api.Service, tasks []*api.Task) {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
// TODO(aaronl): optimistic update?
t = store.GetTask(tx, t.ID)
if t != nil {
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("removing task %s failed", t.ID)
}
}
}
示例9: removeOldTasks
func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) {
for _, original := range removeTasks {
err := batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to shut it down", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("shutting down stale task %s failed", original.ID)
}
}
}
示例10: moveTasksToOrphaned
func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
_, err := d.store.Batch(func(batch *store.Batch) error {
var (
tasks []*api.Task
err error
)
d.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
})
if err != nil {
return err
}
for _, task := range tasks {
if task.Status.State < api.TaskStateOrphaned {
task.Status.State = api.TaskStateOrphaned
}
if err := batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, task)
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
}
return nil
})
return err
}
示例11: TestUpdaterRollback
func TestUpdaterRollback(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
var (
failImage1 uint32
failImage2 uint32
)
watchCreate, cancelCreate := state.Watch(s.WatchQueue(), state.EventCreateTask{})
defer cancelCreate()
watchServiceUpdate, cancelServiceUpdate := state.Watch(s.WatchQueue(), state.EventUpdateService{})
defer cancelServiceUpdate()
// Fail new tasks the updater tries to run
watchUpdate, cancelUpdate := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancelUpdate()
go func() {
failedLast := false
for {
select {
case e := <-watchUpdate:
task := e.(state.EventUpdateTask).Task
if task.DesiredState == task.Status.State {
continue
}
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Never fail two image2 tasks in a row, so there's a mix of
// failed and successful tasks for the rollback.
if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 {
task.Status.State = api.TaskStateFailed
failedLast = true
} else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast {
task.Status.State = api.TaskStateFailed
failedLast = true
} else {
task.Status.State = task.DesiredState
failedLast = false
}
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
} else if task.DesiredState > api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}
}()
// Create a service with four replicas specified before the orchestrator
// is started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
s1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "image1",
},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnNone,
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 4,
},
},
Update: &api.UpdateConfig{
FailureAction: api.UpdateConfig_ROLLBACK,
Parallelism: 1,
Delay: *ptypes.DurationProto(10 * time.Millisecond),
Monitor: ptypes.DurationProto(500 * time.Millisecond),
MaxFailureRatio: 0.4,
},
},
}
assert.NoError(t, store.CreateService(tx, s1))
return nil
})
//.........這裏部分代碼省略.........
示例12: TestOrchestratorRestartDelay
func TestOrchestratorRestartDelay(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(100 * time.Millisecond),
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it gets restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed}
before := time.Now()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
expectCommit(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
observedTask4 := watchTaskUpdate(t, watch)
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatalf("restart delay should have elapsed. Got: %v", after.Sub(before))
}
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
}
示例13: procUnallocatedTasksNetwork
func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context, nc *networkContext) {
tasks := make([]*api.Task, 0, len(nc.unallocatedTasks))
committed, err := a.store.Batch(func(batch *store.Batch) error {
for _, t := range nc.unallocatedTasks {
var allocatedT *api.Task
err := batch.Update(func(tx store.Tx) error {
var err error
allocatedT, err = a.allocateTask(ctx, nc, tx, t)
return err
})
if err != nil {
log.G(ctx).WithError(err).Error("task allocation failure")
continue
}
tasks = append(tasks, allocatedT)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
}
var retryCnt int
for len(tasks) != 0 {
var err error
for _, t := range tasks[:committed] {
delete(nc.unallocatedTasks, t.ID)
}
tasks = tasks[committed:]
if len(tasks) == 0 {
break
}
updatedTasks := make([]*api.Task, 0, len(tasks))
committed, err = a.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
return store.UpdateTask(tx, t)
})
if err != nil {
log.G(ctx).WithError(err).Error("allocated task store update failure")
continue
}
updatedTasks = append(updatedTasks, t)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
}
tasks = updatedTasks
select {
case <-ctx.Done():
return
default:
}
retryCnt++
if retryCnt >= 3 {
log.G(ctx).Errorf("failed to complete batch update of allocated tasks after 3 retries")
break
}
}
}
示例14: doNetworkInit
//.........這裏部分代碼省略.........
node.Attachment.Network = nc.ingressNetwork.Copy()
if err := a.allocateNode(ctx, nc, node); err != nil {
log.G(ctx).Errorf("Failed to allocate network resources for node %s during init: %v", node.ID, err)
}
}
// Allocate services in the store so far before we process watched events.
var services []*api.Service
a.store.View(func(tx store.ReadTx) {
services, err = store.FindServices(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all services in store while trying to allocate during init: %v", err)
}
for _, s := range services {
if nc.nwkAllocator.IsServiceAllocated(s) {
continue
}
if err := a.allocateService(ctx, nc, s); err != nil {
log.G(ctx).Errorf("failed allocating service %s during init: %v", s.ID, err)
}
}
// Allocate tasks in the store so far before we started watching.
var tasks []*api.Task
a.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all tasks in store while trying to allocate during init: %v", err)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
if taskDead(t) {
continue
}
var s *api.Service
if t.ServiceID != "" {
a.store.View(func(tx store.ReadTx) {
s = store.GetService(tx, t.ServiceID)
})
}
// Populate network attachments in the task
// based on service spec.
a.taskCreateNetworkAttachments(t, s)
if taskReadyForNetworkVote(t, s, nc) {
if t.Status.State >= api.TaskStateAllocated {
continue
}
if a.taskAllocateVote(networkVoter, t.ID) {
// If the task is not attached to any network, network
// allocators job is done. Immediately cast a vote so
// that the task can be moved to ALLOCATED state as
// soon as possible.
if err := batch.Update(func(tx store.Tx) error {
storeT := store.GetTask(tx, t.ID)
if storeT == nil {
return fmt.Errorf("task %s not found while trying to update state", t.ID)
}
updateTaskStatus(storeT, api.TaskStateAllocated, "allocated")
if err := store.UpdateTask(tx, storeT); err != nil {
return fmt.Errorf("failed updating state in store transaction for task %s: %v", storeT.ID, err)
}
return nil
}); err != nil {
log.G(ctx).WithError(err).Error("error updating task network")
}
}
continue
}
err := batch.Update(func(tx store.Tx) error {
_, err := a.allocateTask(ctx, nc, tx, t)
return err
})
if err != nil {
log.G(ctx).Errorf("failed allocating task %s during init: %v", t.ID, err)
nc.unallocatedTasks[t.ID] = t
}
}
return nil
}); err != nil {
return err
}
a.netCtx = nc
return nil
}
示例15: allocateTask
func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx store.Tx, t *api.Task) (*api.Task, error) {
taskUpdated := false
// Get the latest task state from the store before updating.
storeT := store.GetTask(tx, t.ID)
if storeT == nil {
return nil, fmt.Errorf("could not find task %s while trying to update network allocation", t.ID)
}
// We might be here even if a task allocation has already
// happened but wasn't successfully committed to store. In such
// cases skip allocation and go straight ahead to updating the
// store.
if !nc.nwkAllocator.IsTaskAllocated(t) {
if t.ServiceID != "" {
s := store.GetService(tx, t.ServiceID)
if s == nil {
return nil, fmt.Errorf("could not find service %s", t.ServiceID)
}
if !nc.nwkAllocator.IsServiceAllocated(s) {
return nil, fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
}
taskUpdateEndpoint(t, s.Endpoint)
}
for _, na := range t.Networks {
n := store.GetNetwork(tx, na.Network.ID)
if n == nil {
return nil, fmt.Errorf("failed to retrieve network %s while allocating task %s", na.Network.ID, t.ID)
}
if !nc.nwkAllocator.IsAllocated(n) {
return nil, fmt.Errorf("network %s attached to task %s not allocated yet", n.ID, t.ID)
}
na.Network = n
}
if err := nc.nwkAllocator.AllocateTask(t); err != nil {
return nil, fmt.Errorf("failed during networktask allocation for task %s: %v", t.ID, err)
}
if nc.nwkAllocator.IsTaskAllocated(t) {
taskUpdateNetworks(storeT, t.Networks)
taskUpdateEndpoint(storeT, t.Endpoint)
taskUpdated = true
}
}
// Update the network allocations and moving to
// ALLOCATED state on top of the latest store state.
if a.taskAllocateVote(networkVoter, t.ID) {
if storeT.Status.State < api.TaskStateAllocated {
updateTaskStatus(storeT, api.TaskStateAllocated, "allocated")
taskUpdated = true
}
}
if taskUpdated {
if err := store.UpdateTask(tx, storeT); err != nil {
return nil, fmt.Errorf("failed updating state in store transaction for task %s: %v", storeT.ID, err)
}
}
return storeT, nil
}