本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.CreateService函數的典型用法代碼示例。如果您正苦於以下問題:Golang CreateService函數的具體用法?Golang CreateService怎麽用?Golang CreateService使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了CreateService函數的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: CreateService
// CreateService creates and return a Service based on the provided ServiceSpec.
// - Returns `InvalidArgument` if the ServiceSpec is malformed.
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
// - Returns `AlreadyExists` if the ServiceID conflicts.
// - Returns an error if the creation fails.
func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {
if err := validateServiceSpec(request.Spec); err != nil {
return nil, err
}
if err := s.checkPortConflicts(request.Spec, ""); err != nil {
return nil, err
}
// TODO(aluzzardi): Consider using `Name` as a primary key to handle
// duplicate creations. See #65
service := &api.Service{
ID: identity.NewID(),
Spec: *request.Spec,
}
err := s.store.Update(func(tx store.Tx) error {
return store.CreateService(tx, service)
})
if err != nil {
return nil, err
}
return &api.CreateServiceResponse{
Service: service,
}, nil
}
示例2: CreateService
// CreateService creates and return a Service based on the provided ServiceSpec.
// - Returns `InvalidArgument` if the ServiceSpec is malformed.
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
// - Returns `AlreadyExists` if the ServiceID conflicts.
// - Returns an error if the creation fails.
func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {
if err := validateServiceSpec(request.Spec); err != nil {
return nil, err
}
if err := s.validateNetworks(request.Spec.Networks); err != nil {
return nil, err
}
if err := s.checkPortConflicts(request.Spec, ""); err != nil {
return nil, err
}
// TODO(aluzzardi): Consider using `Name` as a primary key to handle
// duplicate creations. See #65
service := &api.Service{
ID: identity.NewID(),
Spec: *request.Spec,
}
err := s.store.Update(func(tx store.Tx) error {
// Check to see if all the secrets being added exist as objects
// in our datastore
err := s.checkSecretExistence(tx, request.Spec)
if err != nil {
return err
}
return store.CreateService(tx, service)
})
if err != nil {
return nil, err
}
return &api.CreateServiceResponse{
Service: service,
}, nil
}
示例3: TestUpdater
func TestUpdater(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
if task.Status.State == task.DesiredState {
continue
}
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}()
instances := 3
cluster := &api.Cluster{
// test cluster configuration propagation to task creation.
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: "default",
},
},
}
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: uint64(instances),
},
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
// This won't apply in this test because we set the old tasks to DEAD.
StopGracePeriod: ptypes.DurationProto(time.Hour),
},
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
for i := 0; i < instances; i++ {
assert.NoError(t, store.CreateTask(tx, newTask(cluster, service, uint64(i))))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
assert.Nil(t, task.LogDriver) // should be left alone
}
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // pick up from task
}
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.DefaultLogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Parallelism: 1,
}
updater = NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks = getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:3", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // still pick up from task
}
service.Spec.Task.GetContainer().Image = "v:4"
service.Spec.Task.LogDriver = nil // use cluster default now.
//.........這裏部分代碼省略.........
示例4: TestUpdaterStopGracePeriod
func TestUpdaterStopGracePeriod(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Explicitly do not set task state to
// DEAD to trigger StopGracePeriod
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateRunning {
task.Status.State = api.TaskStateRunning
return store.UpdateTask(tx, task)
}
return nil
})
assert.NoError(t, err)
}
}
}()
var instances uint64 = 3
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
StopGracePeriod: ptypes.DurationProto(100 * time.Millisecond),
},
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: instances,
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateService(tx, service))
for i := uint64(0); i < instances; i++ {
task := newTask(nil, service, uint64(i))
task.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
}
before := time.Now()
service.Spec.Task.GetContainer().Image = "v:2"
updater := NewUpdater(s, NewRestartSupervisor(s))
// Override the default (1 minute) to speed up the test.
updater.restarts.taskTimeout = 100 * time.Millisecond
updater.Run(ctx, nil, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
}
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatal("stop timeout should have elapsed")
}
}
示例5: TestTaskHistory
func TestTaskHistory(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
assert.NoError(t, s.Update(func(tx store.Tx) error {
store.CreateCluster(tx, &api.Cluster{
ID: identity.NewID(),
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: 2,
},
},
})
return nil
}))
taskReaper := NewTaskReaper(s)
defer taskReaper.Stop()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
Task: api.TaskSpec{
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(0),
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
go taskReaper.Run()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail both tasks. They should both get restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status.State = api.TaskStateFailed
updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"}
updatedTask2 := observedTask2.Copy()
updatedTask2.Status.State = api.TaskStateFailed
updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"}
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
expectTaskUpdate(t, watch)
observedTask4 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
// Fail these replacement tasks. Since TaskHistory is set to 2, this
// should cause the oldest tasks for each instance to get deleted.
//.........這裏部分代碼省略.........
示例6: TestIsStateDirty
func TestIsStateDirty(t *testing.T) {
ctx := context.Background()
temp, err := ioutil.TempFile("", "test-socket")
assert.NoError(t, err)
assert.NoError(t, temp.Close())
assert.NoError(t, os.Remove(temp.Name()))
defer os.RemoveAll(temp.Name())
stateDir, err := ioutil.TempDir("", "test-raft")
assert.NoError(t, err)
defer os.RemoveAll(stateDir)
tc := testutils.NewTestCA(t, func(p ca.CertPaths) *ca.KeyReadWriter {
return ca.NewKeyReadWriter(p, []byte("kek"), nil)
})
defer tc.Stop()
managerSecurityConfig, err := tc.NewNodeConfig(ca.ManagerRole)
assert.NoError(t, err)
m, err := New(&Config{
RemoteAPI: &RemoteAddrs{ListenAddr: "127.0.0.1:0"},
ControlAPI: temp.Name(),
StateDir: stateDir,
SecurityConfig: managerSecurityConfig,
AutoLockManagers: true,
UnlockKey: []byte("kek"),
})
assert.NoError(t, err)
assert.NotNil(t, m)
go m.Run(ctx)
defer m.Stop(ctx, false)
// State should never be dirty just after creating the manager
isDirty, err := m.IsStateDirty()
assert.NoError(t, err)
assert.False(t, isDirty)
// Wait for cluster and node to be created.
watch, cancel := state.Watch(m.raftNode.MemoryStore().WatchQueue())
defer cancel()
<-watch
<-watch
// Updating the node should not cause the state to become dirty
assert.NoError(t, m.raftNode.MemoryStore().Update(func(tx store.Tx) error {
node := store.GetNode(tx, m.config.SecurityConfig.ClientTLSCreds.NodeID())
require.NotNil(t, node)
node.Spec.Availability = api.NodeAvailabilityPause
return store.UpdateNode(tx, node)
}))
isDirty, err = m.IsStateDirty()
assert.NoError(t, err)
assert.False(t, isDirty)
// Adding a service should cause the state to become dirty
assert.NoError(t, m.raftNode.MemoryStore().Update(func(tx store.Tx) error {
return store.CreateService(tx, &api.Service{ID: "foo"})
}))
isDirty, err = m.IsStateDirty()
assert.NoError(t, err)
assert.True(t, isDirty)
}
示例7: TestLogBrokerSelector
func TestLogBrokerSelector(t *testing.T) {
ctx, ca, _, serverAddr, brokerAddr, done := testLogBrokerEnv(t)
defer done()
client, clientDone := testLogClient(t, serverAddr)
defer clientDone()
agent1, agent1Security, agent1Done := testBrokerClient(t, ca, brokerAddr)
defer agent1Done()
agent1subscriptions := listenSubscriptions(ctx, t, agent1)
agent2, agent2Security, agent2Done := testBrokerClient(t, ca, brokerAddr)
defer agent2Done()
agent2subscriptions := listenSubscriptions(ctx, t, agent2)
// Subscribe to a task.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
return store.CreateTask(tx, &api.Task{
ID: "task",
})
}))
_, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{
Options: &api.LogSubscriptionOptions{
Follow: true,
},
Selector: &api.LogSelector{
TaskIDs: []string{"task"},
},
})
require.NoError(t, err)
// Since it's not assigned to any agent, nobody should receive it.
ensureNoSubscription(t, agent1subscriptions)
ensureNoSubscription(t, agent2subscriptions)
// Assign the task to agent-1. Make sure it's received by agent-1 but *not*
// agent-2.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
task := store.GetTask(tx, "task")
require.NotNil(t, task)
task.NodeID = agent1Security.ServerTLSCreds.NodeID()
return store.UpdateTask(tx, task)
}))
ensureSubscription(t, agent1subscriptions)
ensureNoSubscription(t, agent2subscriptions)
// Subscribe to a service.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
return store.CreateService(tx, &api.Service{
ID: "service",
})
}))
_, err = client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{
Options: &api.LogSubscriptionOptions{
Follow: true,
},
Selector: &api.LogSelector{
ServiceIDs: []string{"service"},
},
})
require.NoError(t, err)
// Since there are no corresponding tasks, nobody should receive it.
ensureNoSubscription(t, agent1subscriptions)
ensureNoSubscription(t, agent2subscriptions)
// Create a task that does *NOT* belong to our service and assign it to node-1.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
return store.CreateTask(tx, &api.Task{
ID: "wrong-task",
ServiceID: "wrong-service",
NodeID: agent1Security.ServerTLSCreds.NodeID(),
})
}))
// Ensure agent-1 doesn't receive it.
ensureNoSubscription(t, agent1subscriptions)
// Now create another task that does belong to our service and assign it to node-1.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
return store.CreateTask(tx, &api.Task{
ID: "service-task-1",
ServiceID: "service",
NodeID: agent1Security.ServerTLSCreds.NodeID(),
})
}))
// Make sure agent-1 receives it...
ensureSubscription(t, agent1subscriptions)
// ...and agent-2 does not.
ensureNoSubscription(t, agent2subscriptions)
// Create another task, same as above.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
return store.CreateTask(tx, &api.Task{
ID: "service-task-2",
ServiceID: "service",
NodeID: agent1Security.ServerTLSCreds.NodeID(),
//.........這裏部分代碼省略.........
示例8: TestOrchestratorRestartOnNone
func TestOrchestratorRestartOnNone(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnNone,
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it does not get restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status.State = api.TaskStateFailed
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
select {
case <-watch:
t.Fatal("got unexpected event")
case <-time.After(100 * time.Millisecond):
}
// Mark the second task as completed. Confirm that it does not get restarted.
updatedTask2 := observedTask2.Copy()
updatedTask2.Status = api.TaskStatus{State: api.TaskStateCompleted}
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
assert.NoError(t, err)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
select {
case <-watch:
t.Fatal("got unexpected event")
case <-time.After(100 * time.Millisecond):
}
}
示例9: TestOrchestratorRestartDelay
func TestOrchestratorRestartDelay(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(100 * time.Millisecond),
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it gets restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed}
before := time.Now()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
expectCommit(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
observedTask4 := watchTaskUpdate(t, watch)
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatalf("restart delay should have elapsed. Got: %v", after.Sub(before))
}
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
}
示例10: TestUpdaterRollback
func TestUpdaterRollback(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
var (
failImage1 uint32
failImage2 uint32
)
watchCreate, cancelCreate := state.Watch(s.WatchQueue(), state.EventCreateTask{})
defer cancelCreate()
watchServiceUpdate, cancelServiceUpdate := state.Watch(s.WatchQueue(), state.EventUpdateService{})
defer cancelServiceUpdate()
// Fail new tasks the updater tries to run
watchUpdate, cancelUpdate := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancelUpdate()
go func() {
failedLast := false
for {
select {
case e := <-watchUpdate:
task := e.(state.EventUpdateTask).Task
if task.DesiredState == task.Status.State {
continue
}
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Never fail two image2 tasks in a row, so there's a mix of
// failed and successful tasks for the rollback.
if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 {
task.Status.State = api.TaskStateFailed
failedLast = true
} else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast {
task.Status.State = api.TaskStateFailed
failedLast = true
} else {
task.Status.State = task.DesiredState
failedLast = false
}
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
} else if task.DesiredState > api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}
}()
// Create a service with four replicas specified before the orchestrator
// is started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
s1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "image1",
},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnNone,
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 4,
},
},
Update: &api.UpdateConfig{
FailureAction: api.UpdateConfig_ROLLBACK,
Parallelism: 1,
Delay: *ptypes.DurationProto(10 * time.Millisecond),
Monitor: ptypes.DurationProto(500 * time.Millisecond),
MaxFailureRatio: 0.4,
},
},
}
assert.NoError(t, store.CreateService(tx, s1))
return nil
})
//.........這裏部分代碼省略.........
示例11: TestOrchestratorRestartWindow
func TestOrchestratorRestartWindow(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
Task: api.TaskSpec{
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(100 * time.Millisecond),
MaxAttempts: 1,
Window: ptypes.DurationProto(500 * time.Millisecond),
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it gets restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed}
before := time.Now()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
expectCommit(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
observedTask4 := watchTaskUpdate(t, watch)
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatal("restart delay should have elapsed")
}
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
// Fail the second task. Confirm that it gets restarted.
updatedTask2 := observedTask2.Copy()
updatedTask2.Status = api.TaskStatus{State: api.TaskStateFailed}
before = time.Now()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
//.........這裏部分代碼省略.........
示例12: TestUpdater
func TestUpdater(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
if task.Status.State == task.DesiredState {
continue
}
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}()
instances := 3
cluster := &api.Cluster{
// test cluster configuration propagation to task creation.
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: "default",
},
},
}
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: uint64(instances),
},
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
},
},
},
Update: &api.UpdateConfig{
// avoid having Run block for a long time to watch for failures
Monitor: gogotypes.DurationProto(50 * time.Millisecond),
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
for i := 0; i < instances; i++ {
assert.NoError(t, store.CreateTask(tx, orchestrator.NewTask(cluster, service, uint64(i), "")))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableSlotSlice(t, s, service)
for _, slot := range originalTasks {
for _, task := range slot {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
assert.Nil(t, task.LogDriver) // should be left alone
}
}
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, getRunnableSlotSlice(t, s, service))
updatedTasks := getRunnableSlotSlice(t, s, service)
for _, slot := range updatedTasks {
for _, task := range slot {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // pick up from task
}
}
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.TaskDefaults.LogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Parallelism: 1,
Monitor: gogotypes.DurationProto(50 * time.Millisecond),
}
updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, getRunnableSlotSlice(t, s, service))
//.........這裏部分代碼省略.........