本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.CreateCluster函數的典型用法代碼示例。如果您正苦於以下問題:Golang CreateCluster函數的具體用法?Golang CreateCluster怎麽用?Golang CreateCluster使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了CreateCluster函數的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: createCluster
func createCluster(t *testing.T, ts *testServer, id, name string, policy api.AcceptancePolicy, rootCA *ca.RootCA) *api.Cluster {
cluster := createClusterObj(id, name, policy, rootCA)
assert.NoError(t, ts.Store.Update(func(tx store.Tx) error {
return store.CreateCluster(tx, cluster)
}))
return cluster
}
示例2: createCluster
func createCluster(t *testing.T, s *store.MemoryStore, id, name string) *api.Cluster {
spec := createClusterSpec(name)
cluster := &api.Cluster{
ID: id,
Spec: *spec,
}
assert.NoError(t, s.Update(func(tx store.Tx) error {
return store.CreateCluster(tx, cluster)
}))
return cluster
}
示例3: createClusterObject
func createClusterObject(t *testing.T, s *store.MemoryStore, acceptancePolicy api.AcceptancePolicy) {
assert.NoError(t, s.Update(func(tx store.Tx) error {
store.CreateCluster(tx, &api.Cluster{
ID: identity.NewID(),
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
AcceptancePolicy: acceptancePolicy,
},
})
return nil
}))
}
示例4: createClusterObject
func createClusterObject(t *testing.T, s *store.MemoryStore, clusterID string, acceptancePolicy api.AcceptancePolicy, externalCAs ...*api.ExternalCA) {
assert.NoError(t, s.Update(func(tx store.Tx) error {
store.CreateCluster(tx, &api.Cluster{
ID: clusterID,
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
AcceptancePolicy: acceptancePolicy,
CAConfig: api.CAConfig{
ExternalCAs: externalCAs,
},
},
})
return nil
}))
}
示例5: createCluster
func createCluster(t *testing.T, ts *testServer, id, name string, policy api.AcceptancePolicy) *api.Cluster {
spec := createClusterSpec(name)
spec.AcceptancePolicy = policy
cluster := &api.Cluster{
ID: id,
Spec: *spec,
RootCA: api.RootCA{
CACert: []byte("-----BEGIN CERTIFICATE-----AwEHoUQDQgAEZ4vGYkSt/kjoHbUjDx9eyO1xBVJEH2F+AwM9lACIZ414cD1qYy8u-----BEGIN CERTIFICATE-----"),
CAKey: []byte("-----BEGIN EC PRIVATE KEY-----AwEHoUQDQgAEZ4vGYkSt/kjoHbUjDx9eyO1xBVJEH2F+AwM9lACIZ414cD1qYy8u-----END EC PRIVATE KEY-----"),
CACertHash: "hash",
},
}
assert.NoError(t, ts.Store.Update(func(tx store.Tx) error {
return store.CreateCluster(tx, cluster)
}))
return cluster
}
示例6: Run
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
//
// TODO(aluzzardi): /!\ This function is *way* too complex. /!\
// It needs to be split into smaller manageable functions.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
// Harakiri.
go func() {
select {
case <-ctx.Done():
case <-m.stopped:
ctxCancel()
}
}()
leadershipCh, cancel := m.RaftNode.SubscribeLeadership()
defer cancel()
go func() {
for leadershipEvent := range leadershipCh {
// read out and discard all of the messages when we've stopped
// don't acquire the mutex yet. if stopped is closed, we don't need
// this stops this loop from starving Run()'s attempt to Lock
select {
case <-m.stopped:
continue
default:
// do nothing, we're not stopped
}
// we're not stopping so NOW acquire the mutex
m.mu.Lock()
newState := leadershipEvent.(raft.LeadershipState)
if newState == raft.IsLeader {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, &api.Cluster{
ID: clusterID,
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
},
Dispatcher: api.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
},
Raft: raftCfg,
CAConfig: initialCAConfig,
},
RootCA: api.RootCA{
CAKey: rootCA.Key,
CACert: rootCA.Cert,
CACertHash: rootCA.Digest.String(),
JoinTokens: api.JoinTokens{
Worker: ca.GenerateJoinToken(rootCA),
Manager: ca.GenerateJoinToken(rootCA),
},
},
})
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, &api.Node{
ID: nodeID,
Certificate: api.Certificate{
CN: nodeID,
Role: api.NodeRoleManager,
Status: api.IssuanceStatus{
State: api.IssuanceStateIssued,
},
},
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
})
return nil
})
//.........這裏部分代碼省略.........
示例7: becomeLeader
// becomeLeader starts the subsystems that are run on the leader.
func (m *Manager) becomeLeader(ctx context.Context) {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, defaultClusterObject(clusterID, initialCAConfig, raftCfg, rootCA))
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, managerNode(nodeID))
return nil
})
// Attempt to rotate the key-encrypting-key of the root CA key-material
err := m.rotateRootCAKEK(ctx, clusterID)
if err != nil {
log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
}
m.replicatedOrchestrator = orchestrator.NewReplicatedOrchestrator(s)
m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s)
m.taskReaper = orchestrator.NewTaskReaper(s)
m.scheduler = scheduler.New(s)
m.keyManager = keymanager.New(s, keymanager.DefaultConfig())
// TODO(stevvooe): Allocate a context that can be used to
// shutdown underlying manager processes when leadership is
// lost.
m.allocator, err = allocator.New(s)
if err != nil {
log.G(ctx).WithError(err).Error("failed to create allocator")
// TODO(stevvooe): It doesn't seem correct here to fail
// creating the allocator but then use it anyway.
}
if m.keyManager != nil {
go func(keyManager *keymanager.KeyManager) {
if err := keyManager.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("keymanager failed with an error")
}
}(m.keyManager)
}
go func(d *dispatcher.Dispatcher) {
if err := d.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("Dispatcher exited with an error")
}
}(m.Dispatcher)
go func(server *ca.Server) {
if err := server.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("CA signer exited with an error")
}
}(m.caserver)
// Start all sub-components in separate goroutines.
// TODO(aluzzardi): This should have some kind of error handling so that
// any component that goes down would bring the entire manager down.
if m.allocator != nil {
go func(allocator *allocator.Allocator) {
if err := allocator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("allocator exited with an error")
}
}(m.allocator)
}
go func(scheduler *scheduler.Scheduler) {
if err := scheduler.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("scheduler exited with an error")
}
}(m.scheduler)
go func(taskReaper *orchestrator.TaskReaper) {
taskReaper.Run()
}(m.taskReaper)
go func(orchestrator *orchestrator.ReplicatedOrchestrator) {
if err := orchestrator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error")
}
}(m.replicatedOrchestrator)
go func(globalOrchestrator *orchestrator.GlobalOrchestrator) {
if err := globalOrchestrator.Run(ctx); err != nil {
//.........這裏部分代碼省略.........
示例8: TestUpdater
func TestUpdater(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
if task.Status.State == task.DesiredState {
continue
}
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}()
instances := 3
cluster := &api.Cluster{
// test cluster configuration propagation to task creation.
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: "default",
},
},
}
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: uint64(instances),
},
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
// This won't apply in this test because we set the old tasks to DEAD.
StopGracePeriod: ptypes.DurationProto(time.Hour),
},
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
for i := 0; i < instances; i++ {
assert.NoError(t, store.CreateTask(tx, newTask(cluster, service, uint64(i))))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
assert.Nil(t, task.LogDriver) // should be left alone
}
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // pick up from task
}
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.DefaultLogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Parallelism: 1,
}
updater = NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks = getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:3", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // still pick up from task
}
service.Spec.Task.GetContainer().Image = "v:4"
service.Spec.Task.LogDriver = nil // use cluster default now.
//.........這裏部分代碼省略.........
示例9: TestTaskHistory
func TestTaskHistory(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
assert.NoError(t, s.Update(func(tx store.Tx) error {
store.CreateCluster(tx, &api.Cluster{
ID: identity.NewID(),
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: 2,
},
},
})
return nil
}))
taskReaper := NewTaskReaper(s)
defer taskReaper.Stop()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
Task: api.TaskSpec{
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(0),
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
go taskReaper.Run()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail both tasks. They should both get restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status.State = api.TaskStateFailed
updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"}
updatedTask2 := observedTask2.Copy()
updatedTask2.Status.State = api.TaskStateFailed
updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"}
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
expectTaskUpdate(t, watch)
observedTask4 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
// Fail these replacement tasks. Since TaskHistory is set to 2, this
// should cause the oldest tasks for each instance to get deleted.
//.........這裏部分代碼省略.........
示例10: TestRemoveNodes
func TestRemoveNodes(t *testing.T) {
ts := newTestServer(t)
defer ts.Stop()
ts.Store.Update(func(tx store.Tx) error {
store.CreateCluster(tx, &api.Cluster{
ID: identity.NewID(),
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
},
})
return nil
})
r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.Empty(t, r.Nodes)
createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY)
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.Len(t, r.Nodes, 1)
createNode(t, ts, "id2", api.NodeRoleWorker, api.NodeMembershipAccepted, api.NodeStatus_READY)
createNode(t, ts, "id3", api.NodeRoleWorker, api.NodeMembershipPending, api.NodeStatus_UNKNOWN)
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.Len(t, r.Nodes, 3)
// Attempt to remove a ready node without force
_, err = ts.Client.RemoveNode(context.Background(),
&api.RemoveNodeRequest{
NodeID: "id2",
Force: false,
},
)
assert.Error(t, err)
r, err = ts.Client.ListNodes(context.Background(),
&api.ListNodesRequest{
Filters: &api.ListNodesRequest_Filters{
Roles: []api.NodeRole{api.NodeRoleManager, api.NodeRoleWorker},
},
},
)
assert.NoError(t, err)
assert.Len(t, r.Nodes, 3)
// Attempt to remove a ready node with force
_, err = ts.Client.RemoveNode(context.Background(),
&api.RemoveNodeRequest{
NodeID: "id2",
Force: true,
},
)
assert.NoError(t, err)
r, err = ts.Client.ListNodes(context.Background(),
&api.ListNodesRequest{
Filters: &api.ListNodesRequest_Filters{
Roles: []api.NodeRole{api.NodeRoleManager, api.NodeRoleWorker},
},
},
)
assert.NoError(t, err)
assert.Len(t, r.Nodes, 2)
clusterResp, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{})
assert.NoError(t, err)
require.Len(t, clusterResp.Clusters, 1)
require.Len(t, clusterResp.Clusters[0].BlacklistedCertificates, 1)
_, ok := clusterResp.Clusters[0].BlacklistedCertificates["id2"]
assert.True(t, ok)
// Attempt to remove a non-ready node without force
_, err = ts.Client.RemoveNode(context.Background(),
&api.RemoveNodeRequest{
NodeID: "id3",
Force: false,
},
)
assert.NoError(t, err)
r, err = ts.Client.ListNodes(context.Background(),
&api.ListNodesRequest{
Filters: &api.ListNodesRequest_Filters{
Roles: []api.NodeRole{api.NodeRoleManager, api.NodeRoleWorker},
},
},
)
assert.NoError(t, err)
assert.Len(t, r.Nodes, 1)
}
示例11: TestUpdateClusterRotateUnlockKey
func TestUpdateClusterRotateUnlockKey(t *testing.T) {
ts := newTestServer(t)
defer ts.Stop()
// create a cluster with extra encryption keys, to make sure they exist
cluster := createClusterObj("id", "name", api.AcceptancePolicy{}, ts.Server.rootCA)
expected := make(map[string]*api.EncryptionKey)
for i := 1; i <= 2; i++ {
value := fmt.Sprintf("fake%d", i)
expected[value] = &api.EncryptionKey{Subsystem: value, Key: []byte(value)}
cluster.UnlockKeys = append(cluster.UnlockKeys, expected[value])
}
require.NoError(t, ts.Store.Update(func(tx store.Tx) error {
return store.CreateCluster(tx, cluster)
}))
// we have to get the key from the memory store, since the cluster returned by the API is redacted
getManagerKey := func() (managerKey *api.EncryptionKey) {
ts.Store.View(func(tx store.ReadTx) {
viewCluster := store.GetCluster(tx, cluster.ID)
// no matter whether there's a manager key or not, the other keys should not have been affected
foundKeys := make(map[string]*api.EncryptionKey)
for _, eKey := range viewCluster.UnlockKeys {
foundKeys[eKey.Subsystem] = eKey
}
for v, key := range expected {
foundKey, ok := foundKeys[v]
require.True(t, ok)
require.Equal(t, key, foundKey)
}
managerKey = foundKeys[ca.ManagerRole]
})
return
}
validateListResult := func(expectedLocked bool) api.Version {
r, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{
Filters: &api.ListClustersRequest_Filters{
NamePrefixes: []string{"name"},
},
})
require.NoError(t, err)
require.Len(t, r.Clusters, 1)
require.Equal(t, expectedLocked, r.Clusters[0].Spec.EncryptionConfig.AutoLockManagers)
require.Nil(t, r.Clusters[0].UnlockKeys) // redacted
return r.Clusters[0].Meta.Version
}
// we start off with manager autolocking turned off
version := validateListResult(false)
require.Nil(t, getManagerKey())
// Rotate unlock key without turning auto-lock on - key should still be nil
_, err := ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{
ClusterID: cluster.ID,
Spec: &cluster.Spec,
ClusterVersion: &version,
Rotation: api.KeyRotation{
ManagerUnlockKey: true,
},
})
require.NoError(t, err)
version = validateListResult(false)
require.Nil(t, getManagerKey())
// Enable auto-lock only, no rotation boolean
spec := cluster.Spec.Copy()
spec.EncryptionConfig.AutoLockManagers = true
_, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{
ClusterID: cluster.ID,
Spec: spec,
ClusterVersion: &version,
})
require.NoError(t, err)
version = validateListResult(true)
managerKey := getManagerKey()
require.NotNil(t, managerKey)
// Rotate the manager key
_, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{
ClusterID: cluster.ID,
Spec: spec,
ClusterVersion: &version,
Rotation: api.KeyRotation{
ManagerUnlockKey: true,
},
})
require.NoError(t, err)
version = validateListResult(true)
newManagerKey := getManagerKey()
require.NotNil(t, managerKey)
require.NotEqual(t, managerKey, newManagerKey)
managerKey = newManagerKey
// Just update the cluster without modifying unlock keys
_, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{
ClusterID: cluster.ID,
Spec: spec,
ClusterVersion: &version,
//.........這裏部分代碼省略.........
示例12: TestUpdater
func TestUpdater(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
if task.Status.State == task.DesiredState {
continue
}
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}()
instances := 3
cluster := &api.Cluster{
// test cluster configuration propagation to task creation.
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: "default",
},
},
}
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: uint64(instances),
},
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
},
},
},
Update: &api.UpdateConfig{
// avoid having Run block for a long time to watch for failures
Monitor: gogotypes.DurationProto(50 * time.Millisecond),
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
for i := 0; i < instances; i++ {
assert.NoError(t, store.CreateTask(tx, orchestrator.NewTask(cluster, service, uint64(i), "")))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableSlotSlice(t, s, service)
for _, slot := range originalTasks {
for _, task := range slot {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
assert.Nil(t, task.LogDriver) // should be left alone
}
}
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, getRunnableSlotSlice(t, s, service))
updatedTasks := getRunnableSlotSlice(t, s, service)
for _, slot := range updatedTasks {
for _, task := range slot {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // pick up from task
}
}
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.TaskDefaults.LogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Parallelism: 1,
Monitor: gogotypes.DurationProto(50 * time.Millisecond),
}
updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, getRunnableSlotSlice(t, s, service))
//.........這裏部分代碼省略.........