本文整理匯總了Golang中github.com/docker/swarmkit/protobuf/ptypes.MustTimestampProto函數的典型用法代碼示例。如果您正苦於以下問題:Golang MustTimestampProto函數的具體用法?Golang MustTimestampProto怎麽用?Golang MustTimestampProto使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了MustTimestampProto函數的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestExpireBlacklistedCerts
func TestExpireBlacklistedCerts(t *testing.T) {
now := time.Now()
longAgo := now.Add(-24 * time.Hour * 1000)
justBeforeGrace := now.Add(-expiredCertGrace - 5*time.Minute)
justAfterGrace := now.Add(-expiredCertGrace + 5*time.Minute)
future := now.Add(time.Hour)
cluster := &api.Cluster{
BlacklistedCertificates: map[string]*api.BlacklistedCertificate{
"longAgo": {Expiry: ptypes.MustTimestampProto(longAgo)},
"justBeforeGrace": {Expiry: ptypes.MustTimestampProto(justBeforeGrace)},
"justAfterGrace": {Expiry: ptypes.MustTimestampProto(justAfterGrace)},
"future": {Expiry: ptypes.MustTimestampProto(future)},
},
}
expireBlacklistedCerts(cluster)
assert.Len(t, cluster.BlacklistedCertificates, 2)
_, hasJustAfterGrace := cluster.BlacklistedCertificates["justAfterGrace"]
assert.True(t, hasJustAfterGrace)
_, hasFuture := cluster.BlacklistedCertificates["future"]
assert.True(t, hasFuture)
}
示例2: taskFitNode
// taskFitNode checks if a node has enough resources to accommodate a task.
func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string) *api.Task {
nodeInfo, err := s.nodeSet.nodeInfo(nodeID)
if err != nil {
// node does not exist in set (it may have been deleted)
return nil
}
newT := *t
s.pipeline.SetTask(t)
if !s.pipeline.Process(&nodeInfo) {
// this node cannot accommodate this task
newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
newT.Status.Message = s.pipeline.Explain()
s.allTasks[t.ID] = &newT
return &newT
}
newT.Status = api.TaskStatus{
State: api.TaskStateAssigned,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "scheduler confirmed task can run on preassigned node",
}
s.allTasks[t.ID] = &newT
if nodeInfo.addTask(&newT) {
s.nodeSet.updateNode(nodeInfo)
}
return &newT
}
示例3: completeUpdate
func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
log.G(ctx).Debugf("update of service %s complete", serviceID)
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus == nil {
// The service was changed since we started this update
return nil
}
if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED
service.UpdateStatus.Message = "rollback completed"
} else {
service.UpdateStatus.State = api.UpdateStatus_COMPLETED
service.UpdateStatus.Message = "update completed"
}
service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now())
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to mark update of service %s complete", serviceID)
}
}
示例4: newLogMessage
// newLogMessage is just a helper to build a new log message.
func newLogMessage(msgctx api.LogContext, format string, vs ...interface{}) api.LogMessage {
return api.LogMessage{
Context: msgctx,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Data: []byte(fmt.Sprintf(format, vs...)),
}
}
示例5: newTask
func newTask(cluster *api.Cluster, service *api.Service, slot uint64) *api.Task {
var logDriver *api.Driver
if service.Spec.Task.LogDriver != nil {
// use the log driver specific to the task, if we have it.
logDriver = service.Spec.Task.LogDriver
} else if cluster != nil {
// pick up the cluster default, if available.
logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
}
taskID := identity.NewID()
// We use the following scheme to assign Task names to Annotations:
// Annotations.Name := <ServiceAnnotations.Name>.<Slot>.<TaskID>
name := fmt.Sprintf("%v.%v.%v", service.Spec.Annotations.Name, slot, taskID)
return &api.Task{
ID: taskID,
Annotations: api.Annotations{Name: name},
ServiceAnnotations: service.Spec.Annotations,
Spec: service.Spec.Task,
ServiceID: service.ID,
Slot: slot,
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
Endpoint: &api.Endpoint{
Spec: service.Spec.Endpoint.Copy(),
},
DesiredState: api.TaskStateRunning,
LogDriver: logDriver,
}
}
示例6: newTask
func newTask(cluster *api.Cluster, service *api.Service, instance uint64) *api.Task {
var logDriver *api.Driver
if service.Spec.Task.LogDriver != nil {
// use the log driver specific to the task, if we have it.
logDriver = service.Spec.Task.LogDriver
} else if cluster != nil {
// pick up the cluster default, if available.
logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
}
// NOTE(stevvooe): For now, we don't override the container naming and
// labeling scheme in the agent. If we decide to do this in the future,
// they should be overridden here.
return &api.Task{
ID: identity.NewID(),
ServiceAnnotations: service.Spec.Annotations,
Spec: service.Spec.Task,
ServiceID: service.ID,
Slot: instance,
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
Endpoint: &api.Endpoint{
Spec: service.Spec.Endpoint.Copy(),
},
DesiredState: api.TaskStateRunning,
LogDriver: logDriver,
}
}
示例7: AttachNetwork
// AttachNetwork allows the node to request the resources
// allocation needed for a network attachment on the specific node.
// - Returns `InvalidArgument` if the Spec is malformed.
// - Returns `NotFound` if the Network is not found.
// - Returns `PermissionDenied` if the Network is not manually attachable.
// - Returns an error if the creation fails.
func (ra *ResourceAllocator) AttachNetwork(ctx context.Context, request *api.AttachNetworkRequest) (*api.AttachNetworkResponse, error) {
nodeInfo, err := ca.RemoteNode(ctx)
if err != nil {
return nil, err
}
var network *api.Network
ra.store.View(func(tx store.ReadTx) {
network = store.GetNetwork(tx, request.Config.Target)
if network == nil {
if networks, err := store.FindNetworks(tx, store.ByName(request.Config.Target)); err == nil && len(networks) == 1 {
network = networks[0]
}
}
})
if network == nil {
return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.Config.Target)
}
if !network.Spec.Attachable {
return nil, grpc.Errorf(codes.PermissionDenied, "network %s not manually attachable", request.Config.Target)
}
t := &api.Task{
ID: identity.NewID(),
NodeID: nodeInfo.NodeID,
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Attachment{
Attachment: &api.NetworkAttachmentSpec{
ContainerID: request.ContainerID,
},
},
Networks: []*api.NetworkAttachmentConfig{
{
Target: network.ID,
Addresses: request.Config.Addresses,
},
},
},
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
DesiredState: api.TaskStateRunning,
// TODO: Add Network attachment.
}
if err := ra.store.Update(func(tx store.Tx) error {
return store.CreateTask(tx, t)
}); err != nil {
return nil, err
}
return &api.AttachNetworkResponse{AttachmentID: t.ID}, nil
}
示例8: noSuitableNode
func (s *Scheduler) noSuitableNode(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) {
explanation := s.pipeline.Explain()
for _, t := range taskGroup {
log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task")
newT := *t
newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
if explanation != "" {
newT.Status.Message = "no suitable node (" + explanation + ")"
} else {
newT.Status.Message = "no suitable node"
}
s.allTasks[t.ID] = &newT
schedulingDecisions[t.ID] = schedulingDecision{old: t, new: &newT}
s.enqueue(&newT)
}
}
示例9: newTask
func newTask(service *api.Service, instance uint64) *api.Task {
// NOTE(stevvooe): For now, we don't override the container naming and
// labeling scheme in the agent. If we decide to do this in the future,
// they should be overridden here.
return &api.Task{
ID: identity.NewID(),
ServiceAnnotations: service.Spec.Annotations,
Spec: service.Spec.Task,
ServiceID: service.ID,
Slot: instance,
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
DesiredState: api.TaskStateRunning,
}
}
示例10: newTask
func newTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task {
var logDriver *api.Driver
if service.Spec.Task.LogDriver != nil {
// use the log driver specific to the task, if we have it.
logDriver = service.Spec.Task.LogDriver
} else if cluster != nil {
// pick up the cluster default, if available.
logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
}
taskID := identity.NewID()
task := api.Task{
ID: taskID,
ServiceAnnotations: service.Spec.Annotations,
Spec: service.Spec.Task,
ServiceID: service.ID,
Slot: slot,
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
Endpoint: &api.Endpoint{
Spec: service.Spec.Endpoint.Copy(),
},
DesiredState: api.TaskStateRunning,
LogDriver: logDriver,
}
// In global mode we also set the NodeID
if nodeID != "" {
task.NodeID = nodeID
}
// Assign name based on task name schema
name := store.TaskName(&task)
task.Annotations = api.Annotations{Name: name}
return &task
}
示例11: startUpdate
func (u *Updater) startUpdate(ctx context.Context, serviceID string) {
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus != nil {
return nil
}
service.UpdateStatus = &api.UpdateStatus{
State: api.UpdateStatus_UPDATING,
Message: "update in progress",
StartedAt: ptypes.MustTimestampProto(time.Now()),
}
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to mark update of service %s in progress", serviceID)
}
}
示例12: updateTaskStatus
// updateTaskStatus sets TaskStatus and updates timestamp.
func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
t.Status.State = newStatus
t.Status.Message = message
t.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
}
示例13: Do
//.........這裏部分代碼省略.........
exitCode = ec.ExitCode()
}
if cause := errors.Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled {
return retry()
}
status.Err = err.Error() // still reported on temporary
if IsTemporary(err) {
return retry()
}
// only at this point do we consider the error fatal to the task.
log.G(ctx).WithError(err).Error("fatal task error")
// NOTE(stevvooe): The following switch dictates the terminal failure
// state based on the state in which the failure was encountered.
switch {
case status.State < api.TaskStateStarting:
status.State = api.TaskStateRejected
case status.State >= api.TaskStateStarting:
status.State = api.TaskStateFailed
}
return status, nil
}
// below, we have several callbacks that are run after the state transition
// is completed.
defer func() {
logStateChange(ctx, task.DesiredState, task.Status.State, status.State)
if !equality.TaskStatusesEqualStable(status, &task.Status) {
status.Timestamp = ptypes.MustTimestampProto(time.Now())
}
}()
// extract the container status from the container, if supported.
defer func() {
// only do this if in an active state
if status.State < api.TaskStateStarting {
return
}
if containerStatus == nil {
// collect this, if we haven't
cctlr, ok := ctlr.(ContainerStatuser)
if !ok {
return
}
var err error
containerStatus, err = cctlr.ContainerStatus(ctx)
if err != nil && !contextDoneError(err) {
log.G(ctx).WithError(err).Error("container status unavailable")
}
// at this point, things have gone fairly wrong. Remain positive
// and let's get something out the door.
if containerStatus == nil {
containerStatus = new(api.ContainerStatus)
containerStatusTask := task.Status.GetContainer()
if containerStatusTask != nil {
*containerStatus = *containerStatusTask // copy it over.
}
}
示例14: scheduleTaskGroup
// scheduleTaskGroup schedules a batch of tasks that are part of the same
// service and share the same version of the spec.
func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) {
// Pick at task at random from taskGroup to use for constraint
// evaluation. It doesn't matter which one we pick because all the
// tasks in the group are equal in terms of the fields the constraint
// filters consider.
var t *api.Task
for _, t = range taskGroup {
break
}
s.pipeline.SetTask(t)
now := time.Now()
nodeLess := func(a *NodeInfo, b *NodeInfo) bool {
// If either node has at least maxFailures recent failures,
// that's the deciding factor.
recentFailuresA := a.countRecentFailures(now, t.ServiceID)
recentFailuresB := b.countRecentFailures(now, t.ServiceID)
if recentFailuresA >= maxFailures || recentFailuresB >= maxFailures {
if recentFailuresA > recentFailuresB {
return false
}
if recentFailuresB > recentFailuresA {
return true
}
}
tasksByServiceA := a.DesiredRunningTasksCountByService[t.ServiceID]
tasksByServiceB := b.DesiredRunningTasksCountByService[t.ServiceID]
if tasksByServiceA < tasksByServiceB {
return true
}
if tasksByServiceA > tasksByServiceB {
return false
}
// Total number of tasks breaks ties.
return a.DesiredRunningTasksCount < b.DesiredRunningTasksCount
}
nodes := s.nodeSet.findBestNodes(len(taskGroup), s.pipeline.Process, nodeLess)
nodeCount := len(nodes)
if nodeCount == 0 {
s.noSuitableNode(ctx, taskGroup, schedulingDecisions)
return
}
failedConstraints := make(map[int]bool) // key is index in nodes slice
nodeIter := 0
for taskID, t := range taskGroup {
n := &nodes[nodeIter%nodeCount]
log.G(ctx).WithField("task.id", t.ID).Debugf("assigning to node %s", n.ID)
newT := *t
newT.NodeID = n.ID
newT.Status = api.TaskStatus{
State: api.TaskStateAssigned,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "scheduler assigned task to node",
}
s.allTasks[t.ID] = &newT
nodeInfo, err := s.nodeSet.nodeInfo(n.ID)
if err == nil && nodeInfo.addTask(&newT) {
s.nodeSet.updateNode(nodeInfo)
nodes[nodeIter%nodeCount] = nodeInfo
}
schedulingDecisions[taskID] = schedulingDecision{old: t, new: &newT}
delete(taskGroup, taskID)
if nodeIter+1 < nodeCount {
// First pass fills the nodes until they have the same
// number of tasks from this service.
nextNode := nodes[(nodeIter+1)%nodeCount]
if nodeLess(&nextNode, &nodeInfo) {
nodeIter++
}
} else {
// In later passes, we just assign one task at a time
// to each node that still meets the constraints.
nodeIter++
}
origNodeIter := nodeIter
for failedConstraints[nodeIter%nodeCount] || !s.pipeline.Process(&nodes[nodeIter%nodeCount]) {
failedConstraints[nodeIter%nodeCount] = true
nodeIter++
if nodeIter-origNodeIter == nodeCount {
// None of the nodes meet the constraints anymore.
s.noSuitableNode(ctx, taskGroup, schedulingDecisions)
return
}
}
}
//.........這裏部分代碼省略.........