本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.CreateTask函數的典型用法代碼示例。如果您正苦於以下問題:Golang CreateTask函數的具體用法?Golang CreateTask怎麽用?Golang CreateTask使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了CreateTask函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestOldTasksBatch
func TestOldTasksBatch(t *testing.T) {
gd, err := startDispatcher(DefaultConfig())
assert.NoError(t, err)
defer gd.Close()
var expectedSessionID string
var nodeID string
{
stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{})
assert.NoError(t, err)
defer stream.CloseSend()
resp, err := stream.Recv()
assert.NoError(t, err)
assert.NotEmpty(t, resp.SessionID)
expectedSessionID = resp.SessionID
nodeID = resp.Node.ID
}
testTask1 := &api.Task{
NodeID: nodeID,
ID: "testTask1",
Status: api.TaskStatus{State: api.TaskStateAssigned},
}
testTask2 := &api.Task{
NodeID: nodeID,
ID: "testTask2",
Status: api.TaskStatus{State: api.TaskStateAssigned},
}
stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{SessionID: expectedSessionID})
assert.NoError(t, err)
resp, err := stream.Recv()
assert.NoError(t, err)
// initially no tasks
assert.Equal(t, 0, len(resp.Tasks))
err = gd.Store.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, testTask1))
assert.NoError(t, store.CreateTask(tx, testTask2))
return nil
})
assert.NoError(t, err)
err = gd.Store.Update(func(tx store.Tx) error {
assert.NoError(t, store.DeleteTask(tx, testTask1.ID))
assert.NoError(t, store.DeleteTask(tx, testTask2.ID))
return nil
})
assert.NoError(t, err)
resp, err = stream.Recv()
assert.NoError(t, err)
// all tasks have been deleted
assert.Equal(t, len(resp.Tasks), 0)
}
示例2: TestLogBrokerNoFollowUnscheduledTask
func TestLogBrokerNoFollowUnscheduledTask(t *testing.T) {
ctx, ca, _, serverAddr, _, done := testLogBrokerEnv(t)
defer done()
client, clientDone := testLogClient(t, serverAddr)
defer clientDone()
// Create fake environment.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
return store.CreateTask(tx, &api.Task{
ID: "task1",
ServiceID: "service",
})
}))
// Subscribe to logs in no follow mode
logs, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{
Options: &api.LogSubscriptionOptions{
Follow: false,
},
Selector: &api.LogSelector{
ServiceIDs: []string{"service"},
},
})
require.NoError(t, err)
// Ensure we receive the message that we could grab
_, err = logs.Recv()
require.Error(t, err)
require.Contains(t, err.Error(), "task1 has not been scheduled")
}
示例3: TestTaskUpdateNoCert
func TestTaskUpdateNoCert(t *testing.T) {
gd, err := startDispatcher(DefaultConfig())
assert.NoError(t, err)
defer gd.Close()
testTask1 := &api.Task{
ID: "testTask1",
}
err = gd.Store.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, testTask1))
return nil
})
assert.NoError(t, err)
testTask1.Status = api.TaskStatus{State: api.TaskStateAssigned}
updReq := &api.UpdateTaskStatusRequest{
Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{
{
TaskID: testTask1.ID,
Status: &testTask1.Status,
},
},
}
// without correct SessionID should fail
resp, err := gd.Clients[2].UpdateTaskStatus(context.Background(), updReq)
assert.Nil(t, resp)
assert.Error(t, err)
assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request")
}
示例4: TestSchedulerNoReadyNodes
func TestSchedulerNoReadyNodes(t *testing.T) {
ctx := context.Background()
initialTask := &api.Task{
ID: "id1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial task
assert.NoError(t, store.CreateTask(tx, initialTask))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
failure := watchAssignmentFailure(t, watch)
assert.Equal(t, "no suitable node", failure.Status.Message)
err = s.Update(func(tx store.Tx) error {
// Create a ready node. The task should get assigned to this
// node.
node := &api.Node{
ID: "newnode",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "newnode",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
assert.NoError(t, store.CreateNode(tx, node))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, "newnode", assignment.NodeID)
}
示例5: updateTask
func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task) error {
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
Task: &api.Task{ID: updated.ID},
Checks: []state.TaskCheckFunc{state.TaskCheckID},
})
defer cancel()
var delayStartCh <-chan struct{}
// Atomically create the updated task and bring down the old one.
_, err := u.store.Batch(func(batch *store.Batch) error {
err := batch.Update(func(tx store.Tx) error {
if err := store.CreateTask(tx, updated); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
u.removeOldTasks(ctx, batch, slot)
for _, t := range slot {
if t.DesiredState == api.TaskStateRunning {
// Wait for the old task to stop or time out, and then set the new one
// to RUNNING.
delayStartCh = u.restarts.DelayStart(ctx, nil, t, updated.ID, 0, true)
break
}
}
return nil
})
if err != nil {
return err
}
if delayStartCh != nil {
<-delayStartCh
}
// Wait for the new task to come up.
// TODO(aluzzardi): Consider adding a timeout here.
for {
select {
case e := <-taskUpdates:
updated = e.(state.EventUpdateTask).Task
if updated.Status.State >= api.TaskStateRunning {
return nil
}
case <-u.stopChan:
return nil
}
}
}
示例6: AttachNetwork
// AttachNetwork allows the node to request the resources
// allocation needed for a network attachment on the specific node.
// - Returns `InvalidArgument` if the Spec is malformed.
// - Returns `NotFound` if the Network is not found.
// - Returns `PermissionDenied` if the Network is not manually attachable.
// - Returns an error if the creation fails.
func (ra *ResourceAllocator) AttachNetwork(ctx context.Context, request *api.AttachNetworkRequest) (*api.AttachNetworkResponse, error) {
nodeInfo, err := ca.RemoteNode(ctx)
if err != nil {
return nil, err
}
var network *api.Network
ra.store.View(func(tx store.ReadTx) {
network = store.GetNetwork(tx, request.Config.Target)
if network == nil {
if networks, err := store.FindNetworks(tx, store.ByName(request.Config.Target)); err == nil && len(networks) == 1 {
network = networks[0]
}
}
})
if network == nil {
return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.Config.Target)
}
if !network.Spec.Attachable {
return nil, grpc.Errorf(codes.PermissionDenied, "network %s not manually attachable", request.Config.Target)
}
t := &api.Task{
ID: identity.NewID(),
NodeID: nodeInfo.NodeID,
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Attachment{
Attachment: &api.NetworkAttachmentSpec{
ContainerID: request.ContainerID,
},
},
Networks: []*api.NetworkAttachmentConfig{
{
Target: network.ID,
Addresses: request.Config.Addresses,
},
},
},
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
DesiredState: api.TaskStateRunning,
// TODO: Add Network attachment.
}
if err := ra.store.Update(func(tx store.Tx) error {
return store.CreateTask(tx, t)
}); err != nil {
return nil, err
}
return &api.AttachNetworkResponse{AttachmentID: t.ID}, nil
}
示例7: addTask
func (g *Orchestrator) addTask(ctx context.Context, batch *store.Batch, service *api.Service, nodeID string) {
task := orchestrator.NewTask(g.cluster, service, 0, nodeID)
err := batch.Update(func(tx store.Tx) error {
return store.CreateTask(tx, task)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: failed to create task")
}
}
示例8: updateTask
func (u *Updater) updateTask(ctx context.Context, original, updated *api.Task) error {
log.G(ctx).Debugf("replacing %s with %s", original.ID, updated.ID)
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
Task: &api.Task{ID: updated.ID},
Checks: []state.TaskCheckFunc{state.TaskCheckID},
})
defer cancel()
var delayStartCh <-chan struct{}
// Atomically create the updated task and bring down the old one.
err := u.store.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to update it", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
if err := store.UpdateTask(tx, t); err != nil {
return err
}
if err := store.CreateTask(tx, updated); err != nil {
return err
}
// Wait for the old task to stop or time out, and then set the new one
// to RUNNING.
delayStartCh = u.restarts.DelayStart(ctx, tx, original, updated.ID, 0, true)
return nil
})
if err != nil {
return err
}
<-delayStartCh
// Wait for the new task to come up.
// TODO(aluzzardi): Consider adding a timeout here.
for {
select {
case e := <-taskUpdates:
updated = e.(state.EventUpdateTask).Task
if updated.Status.State >= api.TaskStateRunning {
return nil
}
case <-u.stopChan:
return nil
}
}
}
示例9: createTask
func createTask(t *testing.T, ts *testServer, desiredState api.TaskState) *api.Task {
task := &api.Task{
ID: identity.NewID(),
DesiredState: desiredState,
}
err := ts.Store.Update(func(tx store.Tx) error {
return store.CreateTask(tx, task)
})
assert.NoError(t, err)
return task
}
示例10: addTasks
func (r *ReplicatedOrchestrator) addTasks(ctx context.Context, batch *store.Batch, service *api.Service, runningSlots map[uint64]slot, count int) {
slot := uint64(0)
for i := 0; i < count; i++ {
// Find an slot number that is missing a running task
for {
slot++
if _, ok := runningSlots[slot]; !ok {
break
}
}
err := batch.Update(func(tx store.Tx) error {
return store.CreateTask(tx, newTask(r.cluster, service, slot))
})
if err != nil {
log.G(ctx).Errorf("Failed to create task: %v", err)
}
}
}
示例11: addTasks
func (r *ReplicatedOrchestrator) addTasks(ctx context.Context, batch *store.Batch, service *api.Service, runningInstances map[uint64]struct{}, count int) {
instance := uint64(0)
for i := 0; i < count; i++ {
// Find an instance number that is missing a running task
for {
instance++
if _, ok := runningInstances[instance]; !ok {
break
}
}
err := batch.Update(func(tx store.Tx) error {
return store.CreateTask(tx, newTask(service, instance))
})
if err != nil {
log.G(ctx).Errorf("Failed to create task: %v", err)
}
}
}
示例12: TestLogBrokerNoFollow
func TestLogBrokerNoFollow(t *testing.T) {
ctx, ca, _, serverAddr, brokerAddr, done := testLogBrokerEnv(t)
defer done()
client, clientDone := testLogClient(t, serverAddr)
defer clientDone()
agent1, agent1Security, agent1Done := testBrokerClient(t, ca, brokerAddr)
defer agent1Done()
agent1subscriptions := listenSubscriptions(ctx, t, agent1)
agent2, agent2Security, agent2Done := testBrokerClient(t, ca, brokerAddr)
defer agent2Done()
agent2subscriptions := listenSubscriptions(ctx, t, agent2)
// Create fake environment.
require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error {
if err := store.CreateTask(tx, &api.Task{
ID: "task1",
ServiceID: "service",
NodeID: agent1Security.ServerTLSCreds.NodeID(),
}); err != nil {
return err
}
if err := store.CreateTask(tx, &api.Task{
ID: "task2",
ServiceID: "service",
NodeID: agent2Security.ServerTLSCreds.NodeID(),
}); err != nil {
return err
}
return nil
}))
// We need to sleep here to give ListenSubscriptions time to call
// registerSubscription before SubscribeLogs concludes that one or both
// of the agents are not connected, and prematurely calls Done for one
// or both nodes. Think of these stream RPC calls as goroutines which
// don't have synchronization around anything that happens in the RPC
// handler before a send or receive. It would be nice if we had a way
// of confirming that a node was listening for subscriptions before
// calling SubscribeLogs, but the current API doesn't provide this.
time.Sleep(time.Second)
// Subscribe to logs in no follow mode
logs, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{
Options: &api.LogSubscriptionOptions{
Follow: false,
},
Selector: &api.LogSelector{
ServiceIDs: []string{"service"},
},
})
require.NoError(t, err)
// Get the subscriptions from the agents.
subscription1 := ensureSubscription(t, agent1subscriptions)
require.Equal(t, subscription1.Selector.ServiceIDs[0], "service")
subscription2 := ensureSubscription(t, agent2subscriptions)
require.Equal(t, subscription2.Selector.ServiceIDs[0], "service")
require.Equal(t, subscription1.ID, subscription2.ID)
// Publish a log message from agent-1 and close the publisher
publisher, err := agent1.PublishLogs(ctx)
require.NoError(t, err)
require.NoError(t,
publisher.Send(&api.PublishLogsMessage{
SubscriptionID: subscription1.ID,
Messages: []api.LogMessage{
newLogMessage(api.LogContext{
NodeID: agent1Security.ServerTLSCreds.NodeID(),
ServiceID: "service",
TaskID: "task1",
}, "log message"),
},
}))
_, err = publisher.CloseAndRecv()
require.NoError(t, err)
// Ensure we get it from the other end
log, err := logs.Recv()
require.NoError(t, err)
require.Len(t, log.Messages, 1)
require.Equal(t, log.Messages[0].Context.NodeID, agent1Security.ServerTLSCreds.NodeID())
// Now publish a message from the other agent and close the subscription
publisher, err = agent2.PublishLogs(ctx)
require.NoError(t, err)
require.NoError(t,
publisher.Send(&api.PublishLogsMessage{
SubscriptionID: subscription2.ID,
Messages: []api.LogMessage{
newLogMessage(api.LogContext{
NodeID: agent2Security.ServerTLSCreds.NodeID(),
ServiceID: "service",
TaskID: "task2",
}, "log message"),
//.........這裏部分代碼省略.........
示例13: TestUpdaterStopGracePeriod
func TestUpdaterStopGracePeriod(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Explicitly do not set task state to
// DEAD to trigger StopGracePeriod
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateRunning {
task.Status.State = api.TaskStateRunning
return store.UpdateTask(tx, task)
}
return nil
})
assert.NoError(t, err)
}
}
}()
var instances uint64 = 3
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
StopGracePeriod: ptypes.DurationProto(100 * time.Millisecond),
},
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: instances,
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateService(tx, service))
for i := uint64(0); i < instances; i++ {
task := newTask(nil, service, uint64(i))
task.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
}
before := time.Now()
service.Spec.Task.GetContainer().Image = "v:2"
updater := NewUpdater(s, NewRestartSupervisor(s))
// Override the default (1 minute) to speed up the test.
updater.restarts.taskTimeout = 100 * time.Millisecond
updater.Run(ctx, nil, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
}
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatal("stop timeout should have elapsed")
}
}
示例14: TestSchedulerResourceConstraint
func TestSchedulerResourceConstraint(t *testing.T) {
ctx := context.Background()
// Create a ready node without enough memory to run the task.
underprovisionedNode := &api.Node{
ID: "underprovisioned",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "underprovisioned",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 1e9,
MemoryBytes: 1e9,
},
},
}
// Non-ready nodes that satisfy the constraints but shouldn't be used
nonready1 := &api.Node{
ID: "nonready1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "nonready1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 2e9,
MemoryBytes: 2e9,
},
},
}
nonready2 := &api.Node{
ID: "nonready2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "nonready2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 2e9,
MemoryBytes: 2e9,
},
},
}
initialTask := &api.Task{
ID: "id1",
DesiredState: api.TaskStateRunning,
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Resources: &api.ResourceRequirements{
Reservations: &api.Resources{
MemoryBytes: 2e9,
},
},
},
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial node and task
assert.NoError(t, store.CreateTask(tx, initialTask))
assert.NoError(t, store.CreateNode(tx, underprovisionedNode))
assert.NoError(t, store.CreateNode(tx, nonready1))
assert.NoError(t, store.CreateNode(tx, nonready2))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
//.........這裏部分代碼省略.........
示例15: TestSchedulerFaultyNode
func TestSchedulerFaultyNode(t *testing.T) {
ctx := context.Background()
taskTemplate := &api.Task{
ServiceID: "service1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
node1 := &api.Node{
ID: "id1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
node2 := &api.Node{
ID: "id2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial nodes, and one task assigned to node id1
assert.NoError(t, store.CreateNode(tx, node1))
assert.NoError(t, store.CreateNode(tx, node2))
task1 := taskTemplate.Copy()
task1.ID = "id1"
task1.NodeID = "id1"
task1.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task1))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
for i := 0; i != 8; i++ {
// Simulate a task failure cycle
newTask := taskTemplate.Copy()
newTask.ID = identity.NewID()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, newTask))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, newTask.ID, assignment.ID)
if i < 5 {
// The first 5 attempts should be assigned to node id2 because
// it has no replicas of the service.
assert.Equal(t, "id2", assignment.NodeID)
} else {
// The next ones should be assigned to id1, since we'll
// flag id2 as potentially faulty.
assert.Equal(t, "id1", assignment.NodeID)
}
err = s.Update(func(tx store.Tx) error {
newTask := store.GetTask(tx, newTask.ID)
require.NotNil(t, newTask)
newTask.Status.State = api.TaskStateFailed
assert.NoError(t, store.UpdateTask(tx, newTask))
return nil
})
assert.NoError(t, err)
}
}