本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.FindTasks函數的典型用法代碼示例。如果您正苦於以下問題:Golang FindTasks函數的具體用法?Golang FindTasks怎麽用?Golang FindTasks使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了FindTasks函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: removeTasksFromNode
func (g *GlobalOrchestrator) removeTasksFromNode(ctx context.Context, node *api.Node) {
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
// GlobalOrchestrator only removes tasks from globalServices
if _, exists := g.globalServices[t.ServiceID]; exists {
g.removeTask(ctx, batch, t)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed")
}
}
示例2: deleteServiceTasks
func deleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to list tasks")
return
}
_, err = s.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
if err := store.DeleteTask(tx, t.ID); err != nil {
log.G(ctx).WithError(err).Errorf("failed to delete task")
}
return nil
})
if err != nil {
return err
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("task search transaction failed")
}
}
示例3: reconcileServiceOneNode
// reconcileServiceOneNode checks one service on one node
func (g *GlobalOrchestrator) reconcileServiceOneNode(ctx context.Context, serviceID string, nodeID string) {
_, exists := g.nodes[nodeID]
if !exists {
return
}
service, exists := g.globalServices[serviceID]
if !exists {
return
}
// the node has completed this servie
completed := false
// tasks for this node and service
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
var tasksOnNode []*api.Task
tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
for _, t := range tasksOnNode {
// only interested in one service
if t.ServiceID != serviceID {
continue
}
if isTaskRunning(t) {
tasks = append(tasks, t)
} else {
if isTaskCompleted(t, restartCondition(t)) {
completed = true
}
}
}
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
// if restart policy considers this node has finished its task
// it should remove all running tasks
if completed {
g.removeTasks(ctx, batch, service, tasks)
return nil
}
// this node needs to run 1 copy of the task
if len(tasks) == 0 {
g.addTask(ctx, batch, service, nodeID)
} else {
g.removeTasks(ctx, batch, service, tasks[1:])
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed")
}
}
示例4: getRunnableAndDeadSlots
// getRunnableAndDeadSlots returns two maps of slots. The first contains slots
// that have at least one task with a desired state above NEW and lesser or
// equal to RUNNING. The second is for slots that only contain tasks with a
// desired state above RUNNING.
func getRunnableAndDeadSlots(s *store.MemoryStore, serviceID string) (map[uint64]slot, map[uint64]slot, error) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID))
})
if err != nil {
return nil, nil, err
}
runningSlots := make(map[uint64]slot)
for _, t := range tasks {
if t.DesiredState <= api.TaskStateRunning {
runningSlots[t.Slot] = append(runningSlots[t.Slot], t)
}
}
deadSlots := make(map[uint64]slot)
for _, t := range tasks {
if _, exists := runningSlots[t.Slot]; !exists {
deadSlots[t.Slot] = append(deadSlots[t.Slot], t)
}
}
return runningSlots, deadSlots, nil
}
示例5: reconcileOneService
func (g *GlobalOrchestrator) reconcileOneService(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService failed finding tasks")
return
}
// a node may have completed this service
nodeCompleted := make(map[string]struct{})
// nodeID -> task list
nodeTasks := make(map[string][]*api.Task)
for _, t := range tasks {
if isTaskRunning(t) {
// Collect all running instances of this service
nodeTasks[t.NodeID] = append(nodeTasks[t.NodeID], t)
} else {
// for finished tasks, check restartPolicy
if isTaskCompleted(t, restartCondition(t)) {
nodeCompleted[t.NodeID] = struct{}{}
}
}
}
_, err = g.store.Batch(func(batch *store.Batch) error {
var updateTasks []*api.Task
for nodeID := range g.nodes {
ntasks := nodeTasks[nodeID]
// if restart policy considers this node has finished its task
// it should remove all running tasks
if _, exists := nodeCompleted[nodeID]; exists {
g.removeTasks(ctx, batch, service, ntasks)
return nil
}
// this node needs to run 1 copy of the task
if len(ntasks) == 0 {
g.addTask(ctx, batch, service, nodeID)
} else {
updateTasks = append(updateTasks, ntasks[0])
g.removeTasks(ctx, batch, service, ntasks[1:])
}
}
if len(updateTasks) > 0 {
g.updater.Update(ctx, service, updateTasks)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService transaction failed")
}
}
示例6: reconcile
func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
r.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
return
}
runningTasks := make([]*api.Task, 0, len(tasks))
runningInstances := make(map[uint64]struct{}) // this could be a bitfield...
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningTasks = append(runningTasks, t)
runningInstances[t.Slot] = struct{}{}
}
}
numTasks := len(runningTasks)
deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
specifiedInstances := int(deploy.Replicated.Replicas)
// TODO(aaronl): Add support for restart delays.
_, err = r.store.Batch(func(batch *store.Batch) error {
switch {
case specifiedInstances > numTasks:
log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numTasks, specifiedInstances)
// Update all current tasks then add missing tasks
r.updater.Update(ctx, service, runningTasks)
r.addTasks(ctx, batch, service, runningInstances, specifiedInstances-numTasks)
case specifiedInstances < numTasks:
// Update up to N tasks then remove the extra
log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numTasks, specifiedInstances)
r.updater.Update(ctx, service, runningTasks[:specifiedInstances])
r.removeTasks(ctx, batch, service, runningTasks[specifiedInstances:])
case specifiedInstances == numTasks:
// Simple update, no scaling - update all tasks.
r.updater.Update(ctx, service, runningTasks)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile batch failed")
}
}
示例7: getRunnableServiceTasks
func getRunnableServiceTasks(t *testing.T, s *store.MemoryStore, service *api.Service) []*api.Task {
var (
err error
tasks []*api.Task
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
assert.NoError(t, err)
runnable := []*api.Task{}
for _, task := range tasks {
if task.DesiredState == api.TaskStateRunning {
runnable = append(runnable, task)
}
}
return runnable
}
示例8: RemoveNetwork
// RemoveNetwork removes a Network referenced by NetworkID.
// - Returns `InvalidArgument` if NetworkID is not provided.
// - Returns `NotFound` if the Network is not found.
// - Returns an error if the deletion fails.
func (s *Server) RemoveNetwork(ctx context.Context, request *api.RemoveNetworkRequest) (*api.RemoveNetworkResponse, error) {
if request.NetworkID == "" {
return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
services, err := store.FindServices(tx, store.ByReferencedNetworkID(request.NetworkID))
if err != nil {
return grpc.Errorf(codes.Internal, "could not find services using network %s: %v", request.NetworkID, err)
}
if len(services) != 0 {
return grpc.Errorf(codes.FailedPrecondition, "network %s is in use by service %s", request.NetworkID, services[0].ID)
}
tasks, err := store.FindTasks(tx, store.ByReferencedNetworkID(request.NetworkID))
if err != nil {
return grpc.Errorf(codes.Internal, "could not find tasks using network %s: %v", request.NetworkID, err)
}
if len(tasks) != 0 {
return grpc.Errorf(codes.FailedPrecondition, "network %s is in use by task %s", request.NetworkID, tasks[0].ID)
}
nw := store.GetNetwork(tx, request.NetworkID)
if _, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"]; ok {
networkDescription := nw.ID
if nw.Spec.Annotations.Name != "" {
networkDescription = fmt.Sprintf("%s (%s)", nw.Spec.Annotations.Name, nw.ID)
}
return grpc.Errorf(codes.PermissionDenied, "%s is a pre-defined network and cannot be removed", networkDescription)
}
return store.DeleteNetwork(tx, request.NetworkID)
})
if err != nil {
if err == store.ErrNotExist {
return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
}
return nil, err
}
return &api.RemoveNetworkResponse{}, nil
}
示例9: match
func (s *subscription) match() {
s.mu.Lock()
defer s.mu.Unlock()
add := func(t *api.Task) {
if t.NodeID == "" {
s.pendingTasks[t.ID] = struct{}{}
return
}
if _, ok := s.nodes[t.NodeID]; !ok {
s.nodes[t.NodeID] = struct{}{}
s.wg.Add(1)
}
}
s.store.View(func(tx store.ReadTx) {
for _, nid := range s.message.Selector.NodeIDs {
s.nodes[nid] = struct{}{}
}
for _, tid := range s.message.Selector.TaskIDs {
if task := store.GetTask(tx, tid); task != nil {
add(task)
}
}
for _, sid := range s.message.Selector.ServiceIDs {
tasks, err := store.FindTasks(tx, store.ByServiceID(sid))
if err != nil {
log.L.Warning(err)
continue
}
for _, task := range tasks {
add(task)
}
}
})
}
示例10: restartTasksByNodeID
func (r *Orchestrator) restartTasksByNodeID(ctx context.Context, nodeID string) {
var err error
r.store.View(func(tx store.ReadTx) {
var tasks []*api.Task
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
for _, t := range tasks {
if t.DesiredState > api.TaskStateRunning {
continue
}
service := store.GetService(tx, t.ServiceID)
if orchestrator.IsReplicatedService(service) {
r.restartTasks[t.ID] = struct{}{}
}
}
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to list tasks to remove")
}
}
示例11: moveTasksToOrphaned
func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
_, err := d.store.Batch(func(batch *store.Batch) error {
var (
tasks []*api.Task
err error
)
d.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
})
if err != nil {
return err
}
for _, task := range tasks {
if task.Status.State < api.TaskStateOrphaned {
task.Status.State = api.TaskStateOrphaned
}
if err := batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, task)
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
}
return nil
})
return err
}
示例12: getRunnableSlots
// getRunnableSlots returns a map of slots that have at least one task with
// a desired state above NEW and lesser or equal to RUNNING.
func getRunnableSlots(s *store.MemoryStore, serviceID string) (map[uint64]slot, error) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID))
})
if err != nil {
return nil, err
}
runningSlots := make(map[uint64]slot)
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningSlots[t.Slot] = append(runningSlots[t.Slot], t)
}
}
return runningSlots, nil
}
示例13: Tasks
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
if err := d.isRunningLocked(); err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Tasks",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log.G(stream.Context()).WithFields(fields).Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
tasksMap := make(map[string]*api.Task)
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
tasksMap[t.ID] = t
}
return nil
},
state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
for {
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var tasks []*api.Task
for _, t := range tasksMap {
// dispatcher only sends tasks that have been assigned to a node
if t != nil && t.Status.State >= api.TaskStateAssigned {
tasks = append(tasks, t)
}
}
if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
return err
}
select {
case event := <-nodeTasks:
switch v := event.(type) {
case state.EventCreateTask:
tasksMap[v.Task.ID] = v.Task
case state.EventUpdateTask:
tasksMap[v.Task.ID] = v.Task
case state.EventDeleteTask:
delete(tasksMap, v.Task.ID)
}
case <-stream.Context().Done():
return stream.Context().Err()
case <-d.ctx.Done():
return d.ctx.Err()
}
}
}
示例14: tick
func (tr *TaskReaper) tick() {
if len(tr.dirty) == 0 {
return
}
defer func() {
tr.dirty = make(map[instanceTuple]struct{})
}()
var deleteTasks []string
tr.store.View(func(tx store.ReadTx) {
for dirty := range tr.dirty {
service := store.GetService(tx, dirty.serviceID)
if service == nil {
continue
}
taskHistory := tr.taskHistory
if taskHistory < 0 {
continue
}
var historicTasks []*api.Task
switch service.Spec.GetMode().(type) {
case *api.ServiceSpec_Replicated:
var err error
historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.serviceID, dirty.instance))
if err != nil {
continue
}
case *api.ServiceSpec_Global:
tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.nodeID))
if err != nil {
continue
}
for _, t := range tasksByNode {
if t.ServiceID == dirty.serviceID {
historicTasks = append(historicTasks, t)
}
}
}
if int64(len(historicTasks)) <= taskHistory {
continue
}
// TODO(aaronl): This could filter for non-running tasks and use quickselect
// instead of sorting the whole slice.
sort.Sort(tasksByTimestamp(historicTasks))
for _, t := range historicTasks {
if t.DesiredState <= api.TaskStateRunning {
// Don't delete running tasks
continue
}
deleteTasks = append(deleteTasks, t.ID)
taskHistory++
if int64(len(historicTasks)) <= taskHistory {
break
}
}
}
})
if len(deleteTasks) > 0 {
tr.store.Batch(func(batch *store.Batch) error {
for _, taskID := range deleteTasks {
batch.Update(func(tx store.Tx) error {
return store.DeleteTask(tx, taskID)
})
}
return nil
})
}
}
示例15: reconcileServices
func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []string) {
nodeCompleted := make(map[string]map[string]struct{})
nodeTasks := make(map[string]map[string][]*api.Task)
g.store.View(func(tx store.ReadTx) {
for _, serviceID := range serviceIDs {
tasks, err := store.FindTasks(tx, store.ByServiceID(serviceID))
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices failed finding tasks for service %s", serviceID)
continue
}
// a node may have completed this service
nodeCompleted[serviceID] = make(map[string]struct{})
// nodeID -> task list
nodeTasks[serviceID] = make(map[string][]*api.Task)
for _, t := range tasks {
if isTaskRunning(t) {
// Collect all running instances of this service
nodeTasks[serviceID][t.NodeID] = append(nodeTasks[serviceID][t.NodeID], t)
} else {
// for finished tasks, check restartPolicy
if isTaskCompleted(t, orchestrator.RestartCondition(t)) {
nodeCompleted[serviceID][t.NodeID] = struct{}{}
}
}
}
}
})
_, err := g.store.Batch(func(batch *store.Batch) error {
var updateTasks []orchestrator.Slot
for _, serviceID := range serviceIDs {
if _, exists := nodeTasks[serviceID]; !exists {
continue
}
service := g.globalServices[serviceID]
for nodeID, node := range g.nodes {
meetsConstraints := constraint.NodeMatches(service.constraints, node)
ntasks := nodeTasks[serviceID][nodeID]
delete(nodeTasks[serviceID], nodeID)
// if restart policy considers this node has finished its task
// it should remove all running tasks
if _, exists := nodeCompleted[serviceID][nodeID]; exists || !meetsConstraints {
g.removeTasks(ctx, batch, ntasks)
continue
}
if node.Spec.Availability == api.NodeAvailabilityPause {
// the node is paused, so we won't add or update
// any tasks
continue
}
// this node needs to run 1 copy of the task
if len(ntasks) == 0 {
g.addTask(ctx, batch, service.Service, nodeID)
} else {
updateTasks = append(updateTasks, ntasks)
}
}
if len(updateTasks) > 0 {
g.updater.Update(ctx, g.cluster, service.Service, updateTasks)
}
// Remove any tasks assigned to nodes not found in g.nodes.
// These must be associated with nodes that are drained, or
// nodes that no longer exist.
for _, ntasks := range nodeTasks[serviceID] {
g.removeTasks(ctx, batch, ntasks)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices transaction failed")
}
}