本文整理匯總了Golang中github.com/docker/swarmkit/protobuf/ptypes.DurationProto函數的典型用法代碼示例。如果您正苦於以下問題:Golang DurationProto函數的具體用法?Golang DurationProto怎麽用?Golang DurationProto使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了DurationProto函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestHealthcheck
func TestHealthcheck(t *testing.T) {
c := containerConfig{
task: &api.Task{
Spec: api.TaskSpec{Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Healthcheck: &api.HealthConfig{
Test: []string{"a", "b", "c"},
Interval: ptypes.DurationProto(time.Second),
Timeout: ptypes.DurationProto(time.Minute),
Retries: 10,
},
},
}},
},
}
config := c.config()
expected := &enginecontainer.HealthConfig{
Test: []string{"a", "b", "c"},
Interval: time.Second,
Timeout: time.Minute,
Retries: 10,
}
if !reflect.DeepEqual(config.Healthcheck, expected) {
t.Fatalf("expected %#v, got %#v", expected, config.Healthcheck)
}
}
示例2: restartPolicyToGRPC
func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) {
var rp *swarmapi.RestartPolicy
if p != nil {
rp = &swarmapi.RestartPolicy{}
switch p.Condition {
case types.RestartPolicyConditionNone:
rp.Condition = swarmapi.RestartOnNone
case types.RestartPolicyConditionOnFailure:
rp.Condition = swarmapi.RestartOnFailure
case types.RestartPolicyConditionAny:
rp.Condition = swarmapi.RestartOnAny
default:
if string(p.Condition) != "" {
return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition)
}
rp.Condition = swarmapi.RestartOnAny
}
if p.Delay != nil {
rp.Delay = ptypes.DurationProto(*p.Delay)
}
if p.Window != nil {
rp.Window = ptypes.DurationProto(*p.Window)
}
if p.MaxAttempts != nil {
rp.MaxAttempts = *p.MaxAttempts
}
}
return rp, nil
}
示例3: restartPolicyToGRPC
func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) {
var rp *swarmapi.RestartPolicy
if p != nil {
rp = &swarmapi.RestartPolicy{}
sanatizedCondition := strings.ToUpper(strings.Replace(string(p.Condition), "-", "_", -1))
if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[sanatizedCondition]; ok {
rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition)
} else if string(p.Condition) == "" {
rp.Condition = swarmapi.RestartOnAny
} else {
return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition)
}
if p.Delay != nil {
rp.Delay = ptypes.DurationProto(*p.Delay)
}
if p.Window != nil {
rp.Window = ptypes.DurationProto(*p.Window)
}
if p.MaxAttempts != nil {
rp.MaxAttempts = *p.MaxAttempts
}
}
return rp, nil
}
示例4: SwarmSpecToGRPCandMerge
// SwarmSpecToGRPCandMerge converts a Spec to a grpc ClusterSpec and merge AcceptancePolicy from an existing grpc ClusterSpec if provided.
func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
spec := swarmapi.ClusterSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
Orchestration: swarmapi.OrchestrationConfig{
TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit,
},
Raft: swarmapi.RaftConfig{
SnapshotInterval: s.Raft.SnapshotInterval,
KeepOldSnapshots: s.Raft.KeepOldSnapshots,
LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers,
HeartbeatTick: s.Raft.HeartbeatTick,
ElectionTick: s.Raft.ElectionTick,
},
Dispatcher: swarmapi.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)),
},
CAConfig: swarmapi.CAConfig{
NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry),
},
}
if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy, existingSpec); err != nil {
return swarmapi.ClusterSpec{}, err
}
return spec, nil
}
示例5: MergeSwarmSpecToGRPC
// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec
func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
// We take the initSpec (either created from scratch, or returned by swarmkit),
// and will only change the value if the one taken from types.Spec is not nil or 0.
// In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo.
if s.Annotations.Name != "" {
spec.Annotations.Name = s.Annotations.Name
}
if len(s.Annotations.Labels) != 0 {
spec.Annotations.Labels = s.Annotations.Labels
}
if s.Orchestration.TaskHistoryRetentionLimit != nil {
spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit
}
if s.Raft.SnapshotInterval != 0 {
spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval
}
if s.Raft.KeepOldSnapshots != nil {
spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots
}
if s.Raft.LogEntriesForSlowFollowers != 0 {
spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers
}
if s.Raft.HeartbeatTick != 0 {
spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick)
}
if s.Raft.ElectionTick != 0 {
spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick)
}
if s.Dispatcher.HeartbeatPeriod != 0 {
spec.Dispatcher.HeartbeatPeriod = ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod))
}
if s.CAConfig.NodeCertExpiry != 0 {
spec.CAConfig.NodeCertExpiry = ptypes.DurationProto(s.CAConfig.NodeCertExpiry)
}
for _, ca := range s.CAConfig.ExternalCAs {
protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))]
if !ok {
return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol)
}
spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{
Protocol: swarmapi.ExternalCA_CAProtocol(protocol),
URL: ca.URL,
Options: ca.Options,
})
}
spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers
return spec, nil
}
示例6: defaultClusterObject
// defaultClusterObject creates a default cluster.
func defaultClusterObject(clusterID string, initialCAConfig api.CAConfig, raftCfg api.RaftConfig, rootCA *ca.RootCA) *api.Cluster {
return &api.Cluster{
ID: clusterID,
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
},
Dispatcher: api.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
},
Raft: raftCfg,
CAConfig: initialCAConfig,
},
RootCA: api.RootCA{
CAKey: rootCA.Key,
CACert: rootCA.Cert,
CACertHash: rootCA.Digest.String(),
JoinTokens: api.JoinTokens{
Worker: ca.GenerateJoinToken(rootCA),
Manager: ca.GenerateJoinToken(rootCA),
},
},
}
}
示例7: TestValidateClusterSpec
func TestValidateClusterSpec(t *testing.T) {
type BadClusterSpec struct {
spec *api.ClusterSpec
c codes.Code
}
for _, bad := range []BadClusterSpec{
{
spec: nil,
c: codes.InvalidArgument,
},
{
spec: &api.ClusterSpec{
Annotations: api.Annotations{
Name: "name",
},
CAConfig: api.CAConfig{
NodeCertExpiry: ptypes.DurationProto(29 * time.Minute),
},
},
c: codes.InvalidArgument,
},
{
spec: &api.ClusterSpec{
Annotations: api.Annotations{
Name: "name",
},
Dispatcher: api.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(-29 * time.Minute),
},
},
c: codes.InvalidArgument,
},
} {
err := validateClusterSpec(bad.spec)
assert.Error(t, err)
assert.Equal(t, bad.c, grpc.Code(err))
}
for _, good := range []*api.ClusterSpec{
createClusterSpec("name"),
} {
err := validateClusterSpec(good)
assert.NoError(t, err)
}
}
示例8: containerToGRPC
func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
containerSpec := &swarmapi.ContainerSpec{
Image: c.Image,
Labels: c.Labels,
Command: c.Command,
Args: c.Args,
Env: c.Env,
Dir: c.Dir,
User: c.User,
Groups: c.Groups,
}
if c.StopGracePeriod != nil {
containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod)
}
// Mounts
for _, m := range c.Mounts {
mount := swarmapi.Mount{
Target: m.Target,
Source: m.Source,
ReadOnly: m.ReadOnly,
}
if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok {
mount.Type = swarmapi.Mount_MountType(mountType)
} else if string(m.Type) != "" {
return nil, fmt.Errorf("invalid MountType: %q", m.Type)
}
if m.BindOptions != nil {
if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok {
mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)}
} else if string(m.BindOptions.Propagation) != "" {
return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation)
}
}
if m.VolumeOptions != nil {
mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
NoCopy: m.VolumeOptions.NoCopy,
Labels: m.VolumeOptions.Labels,
}
if m.VolumeOptions.DriverConfig != nil {
mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
Name: m.VolumeOptions.DriverConfig.Name,
Options: m.VolumeOptions.DriverConfig.Options,
}
}
}
containerSpec.Mounts = append(containerSpec.Mounts, mount)
}
return containerSpec, nil
}
示例9: createClusterSpec
func createClusterSpec(name string) *api.ClusterSpec {
return &api.ClusterSpec{
Annotations: api.Annotations{
Name: name,
},
CAConfig: api.CAConfig{
NodeCertExpiry: ptypes.DurationProto(ca.DefaultNodeCertExpiration),
},
}
}
示例10: SwarmSpecToGRPCandMerge
// SwarmSpecToGRPCandMerge converts a Spec to a grpc ClusterSpec and merge AcceptancePolicy from an existing grpc ClusterSpec if provided.
func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
spec := swarmapi.ClusterSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
Orchestration: swarmapi.OrchestrationConfig{
TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit,
},
Raft: swarmapi.RaftConfig{
SnapshotInterval: s.Raft.SnapshotInterval,
KeepOldSnapshots: s.Raft.KeepOldSnapshots,
LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers,
HeartbeatTick: s.Raft.HeartbeatTick,
ElectionTick: s.Raft.ElectionTick,
},
Dispatcher: swarmapi.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)),
},
CAConfig: swarmapi.CAConfig{
NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry),
},
}
for _, ca := range s.CAConfig.ExternalCAs {
protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))]
if !ok {
return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol)
}
spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{
Protocol: swarmapi.ExternalCA_CAProtocol(protocol),
URL: ca.URL,
Options: ca.Options,
})
}
if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy, existingSpec); err != nil {
return swarmapi.ClusterSpec{}, err
}
return spec, nil
}
示例11: TestValidateRestartPolicy
func TestValidateRestartPolicy(t *testing.T) {
bad := []*api.RestartPolicy{
{
Delay: ptypes.DurationProto(time.Duration(-1 * time.Second)),
Window: ptypes.DurationProto(time.Duration(-1 * time.Second)),
},
{
Delay: ptypes.DurationProto(time.Duration(20 * time.Second)),
Window: ptypes.DurationProto(time.Duration(-4 * time.Second)),
},
}
good := []*api.RestartPolicy{
{
Delay: ptypes.DurationProto(time.Duration(10 * time.Second)),
Window: ptypes.DurationProto(time.Duration(1 * time.Second)),
},
}
for _, b := range bad {
err := validateRestartPolicy(b)
assert.Error(t, err)
assert.Equal(t, codes.InvalidArgument, grpc.Code(err))
}
for _, g := range good {
assert.NoError(t, validateRestartPolicy(g))
}
}
示例12: Run
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
//
// TODO(aluzzardi): /!\ This function is *way* too complex. /!\
// It needs to be split into smaller manageable functions.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
// Harakiri.
go func() {
select {
case <-ctx.Done():
case <-m.stopped:
ctxCancel()
}
}()
leadershipCh, cancel := m.RaftNode.SubscribeLeadership()
defer cancel()
go func() {
for leadershipEvent := range leadershipCh {
// read out and discard all of the messages when we've stopped
// don't acquire the mutex yet. if stopped is closed, we don't need
// this stops this loop from starving Run()'s attempt to Lock
select {
case <-m.stopped:
continue
default:
// do nothing, we're not stopped
}
// we're not stopping so NOW acquire the mutex
m.mu.Lock()
newState := leadershipEvent.(raft.LeadershipState)
if newState == raft.IsLeader {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, &api.Cluster{
ID: clusterID,
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
},
Dispatcher: api.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
},
Raft: raftCfg,
CAConfig: initialCAConfig,
},
RootCA: api.RootCA{
CAKey: rootCA.Key,
CACert: rootCA.Cert,
CACertHash: rootCA.Digest.String(),
JoinTokens: api.JoinTokens{
Worker: ca.GenerateJoinToken(rootCA),
Manager: ca.GenerateJoinToken(rootCA),
},
},
})
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, &api.Node{
ID: nodeID,
Certificate: api.Certificate{
CN: nodeID,
Role: api.NodeRoleManager,
Status: api.IssuanceStatus{
State: api.IssuanceStateIssued,
},
},
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
})
return nil
})
//.........這裏部分代碼省略.........
示例13: TestUpdaterRollback
func TestUpdaterRollback(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
var (
failImage1 uint32
failImage2 uint32
)
watchCreate, cancelCreate := state.Watch(s.WatchQueue(), state.EventCreateTask{})
defer cancelCreate()
watchServiceUpdate, cancelServiceUpdate := state.Watch(s.WatchQueue(), state.EventUpdateService{})
defer cancelServiceUpdate()
// Fail new tasks the updater tries to run
watchUpdate, cancelUpdate := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancelUpdate()
go func() {
failedLast := false
for {
select {
case e := <-watchUpdate:
task := e.(state.EventUpdateTask).Task
if task.DesiredState == task.Status.State {
continue
}
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Never fail two image2 tasks in a row, so there's a mix of
// failed and successful tasks for the rollback.
if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 {
task.Status.State = api.TaskStateFailed
failedLast = true
} else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast {
task.Status.State = api.TaskStateFailed
failedLast = true
} else {
task.Status.State = task.DesiredState
failedLast = false
}
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
} else if task.DesiredState > api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}
}()
// Create a service with four replicas specified before the orchestrator
// is started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
s1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "image1",
},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnNone,
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 4,
},
},
Update: &api.UpdateConfig{
FailureAction: api.UpdateConfig_ROLLBACK,
Parallelism: 1,
Delay: *ptypes.DurationProto(10 * time.Millisecond),
Monitor: ptypes.DurationProto(500 * time.Millisecond),
MaxFailureRatio: 0.4,
},
},
}
assert.NoError(t, store.CreateService(tx, s1))
return nil
})
//.........這裏部分代碼省略.........
示例14: DefaultCAConfig
// DefaultCAConfig returns the default CA Config, with a default expiration.
func DefaultCAConfig() api.CAConfig {
return api.CAConfig{
NodeCertExpiry: ptypes.DurationProto(DefaultNodeCertExpiration),
}
}
示例15: TestOrchestratorRestartOnAny
func TestOrchestratorRestartOnAny(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(0),
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it gets restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed}
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
expectCommit(t, watch)
observedTask4 := watchTaskUpdate(t, watch)
assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
// Mark the second task as completed. Confirm that it gets restarted.
updatedTask2 := observedTask2.Copy()
updatedTask2.Status = api.TaskStatus{State: api.TaskStateCompleted}
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask5 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask5.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask5.ServiceAnnotations.Name, "name1")
expectCommit(t, watch)
observedTask6 := watchTaskUpdate(t, watch)
assert.Equal(t, observedTask6.DesiredState, api.TaskStateRunning)
assert.Equal(t, observedTask6.ServiceAnnotations.Name, "name1")
//.........這裏部分代碼省略.........