本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.ByServiceID函數的典型用法代碼示例。如果您正苦於以下問題:Golang ByServiceID函數的具體用法?Golang ByServiceID怎麽用?Golang ByServiceID使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ByServiceID函數的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: getRunnableAndDeadSlots
// getRunnableAndDeadSlots returns two maps of slots. The first contains slots
// that have at least one task with a desired state above NEW and lesser or
// equal to RUNNING. The second is for slots that only contain tasks with a
// desired state above RUNNING.
func getRunnableAndDeadSlots(s *store.MemoryStore, serviceID string) (map[uint64]slot, map[uint64]slot, error) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID))
})
if err != nil {
return nil, nil, err
}
runningSlots := make(map[uint64]slot)
for _, t := range tasks {
if t.DesiredState <= api.TaskStateRunning {
runningSlots[t.Slot] = append(runningSlots[t.Slot], t)
}
}
deadSlots := make(map[uint64]slot)
for _, t := range tasks {
if _, exists := runningSlots[t.Slot]; !exists {
deadSlots[t.Slot] = append(deadSlots[t.Slot], t)
}
}
return runningSlots, deadSlots, nil
}
示例2: deleteServiceTasks
func deleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to list tasks")
return
}
_, err = s.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
if err := store.DeleteTask(tx, t.ID); err != nil {
log.G(ctx).WithError(err).Errorf("failed to delete task")
}
return nil
})
if err != nil {
return err
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("task search transaction failed")
}
}
示例3: reconcileOneService
func (g *GlobalOrchestrator) reconcileOneService(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService failed finding tasks")
return
}
// a node may have completed this service
nodeCompleted := make(map[string]struct{})
// nodeID -> task list
nodeTasks := make(map[string][]*api.Task)
for _, t := range tasks {
if isTaskRunning(t) {
// Collect all running instances of this service
nodeTasks[t.NodeID] = append(nodeTasks[t.NodeID], t)
} else {
// for finished tasks, check restartPolicy
if isTaskCompleted(t, restartCondition(t)) {
nodeCompleted[t.NodeID] = struct{}{}
}
}
}
_, err = g.store.Batch(func(batch *store.Batch) error {
var updateTasks []*api.Task
for nodeID := range g.nodes {
ntasks := nodeTasks[nodeID]
// if restart policy considers this node has finished its task
// it should remove all running tasks
if _, exists := nodeCompleted[nodeID]; exists {
g.removeTasks(ctx, batch, service, ntasks)
return nil
}
// this node needs to run 1 copy of the task
if len(ntasks) == 0 {
g.addTask(ctx, batch, service, nodeID)
} else {
updateTasks = append(updateTasks, ntasks[0])
g.removeTasks(ctx, batch, service, ntasks[1:])
}
}
if len(updateTasks) > 0 {
g.updater.Update(ctx, service, updateTasks)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService transaction failed")
}
}
示例4: reconcile
func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
r.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
return
}
runningTasks := make([]*api.Task, 0, len(tasks))
runningInstances := make(map[uint64]struct{}) // this could be a bitfield...
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningTasks = append(runningTasks, t)
runningInstances[t.Slot] = struct{}{}
}
}
numTasks := len(runningTasks)
deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
specifiedInstances := int(deploy.Replicated.Replicas)
// TODO(aaronl): Add support for restart delays.
_, err = r.store.Batch(func(batch *store.Batch) error {
switch {
case specifiedInstances > numTasks:
log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numTasks, specifiedInstances)
// Update all current tasks then add missing tasks
r.updater.Update(ctx, service, runningTasks)
r.addTasks(ctx, batch, service, runningInstances, specifiedInstances-numTasks)
case specifiedInstances < numTasks:
// Update up to N tasks then remove the extra
log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numTasks, specifiedInstances)
r.updater.Update(ctx, service, runningTasks[:specifiedInstances])
r.removeTasks(ctx, batch, service, runningTasks[specifiedInstances:])
case specifiedInstances == numTasks:
// Simple update, no scaling - update all tasks.
r.updater.Update(ctx, service, runningTasks)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile batch failed")
}
}
示例5: getRunnableServiceTasks
func getRunnableServiceTasks(t *testing.T, s *store.MemoryStore, service *api.Service) []*api.Task {
var (
err error
tasks []*api.Task
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
assert.NoError(t, err)
runnable := []*api.Task{}
for _, task := range tasks {
if task.DesiredState == api.TaskStateRunning {
runnable = append(runnable, task)
}
}
return runnable
}
示例6: match
func (s *subscription) match() {
s.mu.Lock()
defer s.mu.Unlock()
add := func(t *api.Task) {
if t.NodeID == "" {
s.pendingTasks[t.ID] = struct{}{}
return
}
if _, ok := s.nodes[t.NodeID]; !ok {
s.nodes[t.NodeID] = struct{}{}
s.wg.Add(1)
}
}
s.store.View(func(tx store.ReadTx) {
for _, nid := range s.message.Selector.NodeIDs {
s.nodes[nid] = struct{}{}
}
for _, tid := range s.message.Selector.TaskIDs {
if task := store.GetTask(tx, tid); task != nil {
add(task)
}
}
for _, sid := range s.message.Selector.ServiceIDs {
tasks, err := store.FindTasks(tx, store.ByServiceID(sid))
if err != nil {
log.L.Warning(err)
continue
}
for _, task := range tasks {
add(task)
}
}
})
}
示例7: getRunnableSlots
// getRunnableSlots returns a map of slots that have at least one task with
// a desired state above NEW and lesser or equal to RUNNING.
func getRunnableSlots(s *store.MemoryStore, serviceID string) (map[uint64]slot, error) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID))
})
if err != nil {
return nil, err
}
runningSlots := make(map[uint64]slot)
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningSlots[t.Slot] = append(runningSlots[t.Slot], t)
}
}
return runningSlots, nil
}
示例8: reconcileServices
func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []string) {
nodeCompleted := make(map[string]map[string]struct{})
nodeTasks := make(map[string]map[string][]*api.Task)
g.store.View(func(tx store.ReadTx) {
for _, serviceID := range serviceIDs {
tasks, err := store.FindTasks(tx, store.ByServiceID(serviceID))
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices failed finding tasks for service %s", serviceID)
continue
}
// a node may have completed this service
nodeCompleted[serviceID] = make(map[string]struct{})
// nodeID -> task list
nodeTasks[serviceID] = make(map[string][]*api.Task)
for _, t := range tasks {
if isTaskRunning(t) {
// Collect all running instances of this service
nodeTasks[serviceID][t.NodeID] = append(nodeTasks[serviceID][t.NodeID], t)
} else {
// for finished tasks, check restartPolicy
if isTaskCompleted(t, orchestrator.RestartCondition(t)) {
nodeCompleted[serviceID][t.NodeID] = struct{}{}
}
}
}
}
})
_, err := g.store.Batch(func(batch *store.Batch) error {
var updateTasks []orchestrator.Slot
for _, serviceID := range serviceIDs {
if _, exists := nodeTasks[serviceID]; !exists {
continue
}
service := g.globalServices[serviceID]
for nodeID, node := range g.nodes {
meetsConstraints := constraint.NodeMatches(service.constraints, node)
ntasks := nodeTasks[serviceID][nodeID]
delete(nodeTasks[serviceID], nodeID)
// if restart policy considers this node has finished its task
// it should remove all running tasks
if _, exists := nodeCompleted[serviceID][nodeID]; exists || !meetsConstraints {
g.removeTasks(ctx, batch, ntasks)
continue
}
if node.Spec.Availability == api.NodeAvailabilityPause {
// the node is paused, so we won't add or update
// any tasks
continue
}
// this node needs to run 1 copy of the task
if len(ntasks) == 0 {
g.addTask(ctx, batch, service.Service, nodeID)
} else {
updateTasks = append(updateTasks, ntasks)
}
}
if len(updateTasks) > 0 {
g.updater.Update(ctx, g.cluster, service.Service, updateTasks)
}
// Remove any tasks assigned to nodes not found in g.nodes.
// These must be associated with nodes that are drained, or
// nodes that no longer exist.
for _, ntasks := range nodeTasks[serviceID] {
g.removeTasks(ctx, batch, ntasks)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices transaction failed")
}
}
示例9: reconcile
func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
r.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
return
}
runningTasks := make([]*api.Task, 0, len(tasks))
runningInstances := make(map[uint64]struct{}) // this could be a bitfield...
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningTasks = append(runningTasks, t)
runningInstances[t.Slot] = struct{}{}
}
}
numTasks := len(runningTasks)
deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
specifiedInstances := int(deploy.Replicated.Replicas)
switch {
case specifiedInstances > numTasks:
log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numTasks, specifiedInstances)
// Update all current tasks then add missing tasks
r.updater.Update(ctx, r.cluster, service, runningTasks)
_, err = r.store.Batch(func(batch *store.Batch) error {
r.addTasks(ctx, batch, service, runningInstances, specifiedInstances-numTasks)
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile batch failed")
}
case specifiedInstances < numTasks:
// Update up to N tasks then remove the extra
log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numTasks, specifiedInstances)
// Preferentially remove tasks on the nodes that have the most
// copies of this service, to leave a more balanced result.
// First sort tasks such that tasks which are currently running
// (in terms of observed state) appear before non-running tasks.
// This will cause us to prefer to remove non-running tasks, all
// other things being equal in terms of node balance.
sort.Sort(tasksByRunningState(runningTasks))
// Assign each task an index that counts it as the nth copy of
// of the service on its node (1, 2, 3, ...), and sort the
// tasks by this counter value.
instancesByNode := make(map[string]int)
tasksWithIndices := make(tasksByIndex, 0, numTasks)
for _, t := range runningTasks {
if t.NodeID != "" {
instancesByNode[t.NodeID]++
tasksWithIndices = append(tasksWithIndices, taskWithIndex{task: t, index: instancesByNode[t.NodeID]})
} else {
tasksWithIndices = append(tasksWithIndices, taskWithIndex{task: t, index: -1})
}
}
sort.Sort(tasksWithIndices)
sortedTasks := make([]*api.Task, 0, numTasks)
for _, t := range tasksWithIndices {
sortedTasks = append(sortedTasks, t.task)
}
r.updater.Update(ctx, r.cluster, service, sortedTasks[:specifiedInstances])
_, err = r.store.Batch(func(batch *store.Batch) error {
r.removeTasks(ctx, batch, service, sortedTasks[specifiedInstances:])
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile batch failed")
}
case specifiedInstances == numTasks:
// Simple update, no scaling - update all tasks.
r.updater.Update(ctx, r.cluster, service, runningTasks)
}
}