本文整理匯總了Golang中github.com/aws/amazon-ecs-agent/agent/utils/ttime.After函數的典型用法代碼示例。如果您正苦於以下問題:Golang After函數的具體用法?Golang After怎麽用?Golang After使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了After函數的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: cleanupTask
func (task *managedTask) cleanupTask() {
cleanupTime := ttime.After(task.KnownStatusTime.Add(taskStoppedDuration).Sub(ttime.Now()))
cleanupTimeBool := make(chan bool)
go func() {
<-cleanupTime
cleanupTimeBool <- true
close(cleanupTimeBool)
}()
for !task.waitEvent(cleanupTimeBool) {
}
log.Debug("Cleaning up task's containers and data", "task", task.Task)
// First make an attempt to cleanup resources
task.engine.sweepTask(task.Task)
task.engine.state.RemoveTask(task.Task)
// Now remove ourselves from the global state and cleanup channels
task.engine.processTasks.Lock()
delete(task.engine.managedTasks, task.Arn)
task.engine.processTasks.Unlock()
task.engine.saver.Save()
// Cleanup any leftover messages before closing their channels. No new
// messages possible because we deleted ourselves from managedTasks, so this
// removes all stale ones
task.discardPendingMessages()
close(task.dockerMessages)
close(task.acsMessages)
}
示例2: ListContainers
// ListContainers returns a slice of container IDs.
func (dg *dockerGoClient) ListContainers(all bool) ListContainersResponse {
timeout := ttime.After(listContainersTimeout)
response := make(chan ListContainersResponse, 1)
go func() { response <- dg.listContainers(all) }()
select {
case resp := <-response:
return resp
case <-timeout:
return ListContainersResponse{Error: &DockerTimeoutError{listContainersTimeout, "listing"}}
}
}
示例3: RemoveContainer
func (dg *dockerGoClient) RemoveContainer(dockerId string) error {
timeout := ttime.After(removeContainerTimeout)
response := make(chan error, 1)
go func() { response <- dg.removeContainer(dockerId) }()
select {
case resp := <-response:
return resp
case <-timeout:
return &DockerTimeoutError{removeContainerTimeout, "removing"}
}
}
示例4: PullImage
func (dg *DockerGoClient) PullImage(image string) DockerContainerMetadata {
timeout := ttime.After(pullImageTimeout)
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.pullImage(image) }()
select {
case resp := <-response:
return resp
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{pullImageTimeout, "pulled"}}
}
}
示例5: StartContainer
func (dg *dockerGoClient) StartContainer(id string) DockerContainerMetadata {
timeout := ttime.After(startContainerTimeout)
ctx, cancelFunc := context.WithCancel(context.TODO()) // Could pass one through from engine
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.startContainer(ctx, id) }()
select {
case resp := <-response:
return resp
case <-timeout:
cancelFunc()
return DockerContainerMetadata{Error: &DockerTimeoutError{startContainerTimeout, "started"}}
}
}
示例6: CreateContainer
func (dg *dockerGoClient) CreateContainer(config *docker.Config, hostConfig *docker.HostConfig, name string) DockerContainerMetadata {
timeout := ttime.After(createContainerTimeout)
ctx, cancelFunc := context.WithCancel(context.TODO()) // Could pass one through from engine
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.createContainer(ctx, config, hostConfig, name) }()
select {
case resp := <-response:
return resp
case <-timeout:
cancelFunc()
return DockerContainerMetadata{Error: &DockerTimeoutError{createContainerTimeout, "created"}}
}
}
示例7: StopContainer
func (dg *dockerGoClient) StopContainer(dockerId string) DockerContainerMetadata {
timeout := ttime.After(stopContainerTimeout)
ctx, cancelFunc := context.WithCancel(context.TODO()) // Could pass one through from engine
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.stopContainer(ctx, dockerId) }()
select {
case resp := <-response:
return resp
case <-timeout:
cancelFunc()
return DockerContainerMetadata{Error: &DockerTimeoutError{stopContainerTimeout, "stopped"}}
}
}
示例8: PullImage
func (dg *dockerGoClient) PullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata {
timeout := ttime.After(pullImageTimeout)
// Workaround for devicemapper bug. See:
// https://github.com/docker/docker/issues/9718
pullLock.Lock()
defer pullLock.Unlock()
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.pullImage(image, authData) }()
select {
case resp := <-response:
return resp
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{pullImageTimeout, "pulled"}}
}
}
示例9: InspectContainer
func (dg *dockerGoClient) InspectContainer(dockerId string) (*docker.Container, error) {
timeout := ttime.After(inspectContainerTimeout)
type inspectResponse struct {
container *docker.Container
err error
}
response := make(chan inspectResponse, 1)
go func() {
container, err := dg.inspectContainer(dockerId)
response <- inspectResponse{container, err}
}()
select {
case resp := <-response:
return resp.container, resp.err
case <-timeout:
return nil, &DockerTimeoutError{inspectContainerTimeout, "inspecting"}
}
}
示例10: cleanupTask
func (task *managedTask) cleanupTask(taskStoppedDuration time.Duration) {
cleanupTimeDuration := task.KnownStatusTime.Add(taskStoppedDuration).Sub(ttime.Now())
// There is a potential deadlock here if cleanupTime is negative. Ignore the computed
// value in this case in favor of the default config value.
if cleanupTimeDuration < 0 {
log.Debug("Task Cleanup Duration is too short. Resetting to " + config.DefaultTaskCleanupWaitDuration.String())
cleanupTimeDuration = config.DefaultTaskCleanupWaitDuration
}
cleanupTime := ttime.After(cleanupTimeDuration)
cleanupTimeBool := make(chan bool)
go func() {
<-cleanupTime
cleanupTimeBool <- true
close(cleanupTimeBool)
}()
for !task.waitEvent(cleanupTimeBool) {
}
log.Debug("Cleaning up task's containers and data", "task", task.Task)
// For the duration of this, simply discard any task events; this ensures the
// speedy processing of other events for other tasks
handleCleanupDone := make(chan struct{})
go func() {
task.engine.sweepTask(task.Task)
task.engine.state.RemoveTask(task.Task)
handleCleanupDone <- struct{}{}
}()
task.discardEventsUntil(handleCleanupDone)
log.Debug("Finished removing task data; removing from state no longer managing", "task", task.Task)
// Now remove ourselves from the global state and cleanup channels
task.engine.processTasks.Lock()
delete(task.engine.managedTasks, task.Arn)
task.engine.processTasks.Unlock()
task.engine.saver.Save()
// Cleanup any leftover messages before closing their channels. No new
// messages possible because we deleted ourselves from managedTasks, so this
// removes all stale ones
task.discardPendingMessages()
close(task.dockerMessages)
close(task.acsMessages)
}
示例11: pullImage
func (dg *dockerGoClient) pullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata {
log.Debug("Pulling image", "image", image)
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
// Special case; this image is not one that should be pulled, but rather
// should be created locally if necessary
if image == emptyvolume.Image+":"+emptyvolume.Tag {
err := dg.createScratchImageIfNotExists()
if err != nil {
return DockerContainerMetadata{Error: &api.DefaultNamedError{Name: "CreateEmptyVolumeError", Err: "Could not create empty volume " + err.Error()}}
}
return DockerContainerMetadata{}
}
authConfig, err := dg.getAuthdata(image, authData)
if err != nil {
return DockerContainerMetadata{Error: err}
}
pullDebugOut, pullWriter := io.Pipe()
defer pullWriter.Close()
repository, tag := parsers.ParseRepositoryTag(image)
if tag == "" {
repository = repository + ":" + dockerDefaultTag
} else {
repository = image
}
opts := docker.PullImageOptions{
Repository: repository,
OutputStream: pullWriter,
}
timeout := ttime.After(dockerPullBeginTimeout)
// pullBegan is a channel indicating that we have seen at least one line of data on the 'OutputStream' above.
// It is here to guard against a bug wherin docker never writes anything to that channel and hangs in pulling forever.
pullBegan := make(chan bool, 1)
// pullBeganOnce ensures we only indicate it began once (since our channel will only be read 0 or 1 times)
pullBeganOnce := sync.Once{}
go func() {
reader := bufio.NewReader(pullDebugOut)
var line string
var err error
for err == nil {
line, err = reader.ReadString('\n')
if err != nil {
break
}
pullBeganOnce.Do(func() {
pullBegan <- true
})
log.Debug("Pulling image", "image", image, "status", line)
if strings.Contains(line, "already being pulled by another client. Waiting.") {
// This can mean the deamon is 'hung' in pulling status for this image, but we can't be sure.
log.Error("Image 'pull' status marked as already being pulled", "image", image, "status", line)
}
}
if err != nil && err != io.EOF {
log.Warn("Error reading pull image status", "image", image, "err", err)
}
}()
pullFinished := make(chan error, 1)
go func() {
pullFinished <- client.PullImage(opts, authConfig)
log.Debug("Pulling image complete", "image", image)
}()
select {
case <-pullBegan:
break
case err := <-pullFinished:
if err != nil {
return DockerContainerMetadata{Error: CannotXContainerError{"Pull", err.Error()}}
}
return DockerContainerMetadata{}
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{dockerPullBeginTimeout, "pullBegin"}}
}
log.Debug("Pull began for image", "image", image)
defer log.Debug("Pull completed for image", "image", image)
err = <-pullFinished
if err != nil {
return DockerContainerMetadata{Error: CannotXContainerError{"Pull", err.Error()}}
}
return DockerContainerMetadata{}
}
示例12: overseeTask
func (task *managedTask) overseeTask() {
llog := log.New("task", task)
// Do a single updatestatus at the beginning to create the container
// 'desiredstatus'es which are a construct of the engine used only here,
// not present on the backend
task.UpdateStatus()
// If this was a 'state restore', send all unsent statuses
task.emitCurrentStatus()
if task.StartSequenceNumber != 0 && !task.DesiredStatus.Terminal() {
llog.Debug("Waiting for any previous stops to complete", "seqnum", task.StartSequenceNumber)
othersStopped := make(chan bool, 1)
go func() {
task.engine.taskStopGroup.Wait(task.StartSequenceNumber)
othersStopped <- true
}()
for !task.waitEvent(othersStopped) {
if task.DesiredStatus.Terminal() {
// If we end up here, that means we recieved a start then stop for this
// task before a task that was expected to stop before it could
// actually stop
break
}
}
llog.Debug("Wait over; ready to move towards status: " + task.DesiredStatus.String())
}
for {
// If it's steadyState, just spin until we need to do work
for task.steadyState() {
llog.Debug("Task at steady state", "state", task.KnownStatus.String())
maxWait := make(chan bool, 1)
timer := ttime.After(steadyStateTaskVerifyInterval)
go func() {
<-timer
maxWait <- true
}()
timedOut := task.waitEvent(maxWait)
if timedOut {
llog.Debug("Checking task to make sure it's still at steadystate")
go task.engine.CheckTaskState(task.Task)
}
}
if !task.KnownStatus.Terminal() {
// If we aren't terminal and we aren't steady state, we should be able to move some containers along
llog.Debug("Task not steady state or terminal; progressing it")
task.progressContainers()
}
// If we reach this point, we've changed the task in some way.
// Conversely, for it to spin in steady state it will have to have been
// loaded in steady state or progressed through here, so saving here should
// be sufficient to capture state changes.
err := task.engine.saver.Save()
if err != nil {
llog.Warn("Error checkpointing task's states to disk", "err", err)
}
if task.KnownStatus.Terminal() {
break
}
}
// We only break out of the above if this task is known to be stopped. Do
// onetime cleanup here, including removing the task after a timeout
llog.Debug("Task has reached stopped. We're just waiting and removing containers now")
if task.StopSequenceNumber != 0 {
llog.Debug("Marking done for this sequence", "seqnum", task.StopSequenceNumber)
task.engine.taskStopGroup.Done(task.StopSequenceNumber)
}
task.cleanupTask(task.engine.cfg.TaskCleanupWaitDuration)
}