本文整理汇总了Golang中github.com/docker/docker/daemon/execdriver.Command.ContainerPid方法的典型用法代码示例。如果您正苦于以下问题:Golang Command.ContainerPid方法的具体用法?Golang Command.ContainerPid怎么用?Golang Command.ContainerPid使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/docker/docker/daemon/execdriver.Command
的用法示例。
在下文中一共展示了Command.ContainerPid方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Run
//.........这里部分代码省略.........
// Listen on the named pipe
inListen, err = npipe.Listen(stdInPipe)
if err != nil {
logrus.Errorf("stdin failed to listen on %s err=%s", stdInPipe, err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer inListen.Close()
// Launch a goroutine to do the accept. We do this so that we can
// cause an otherwise blocking goroutine to gracefully close when
// the caller (us) closes the listener
go stdinAccept(inListen, stdInPipe, pipes.Stdin)
}
// Connect stdout
stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdout")
createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdout")
outListen, err = npipe.Listen(stdOutPipe)
if err != nil {
logrus.Errorf("stdout failed to listen on %s err=%s", stdOutPipe, err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer outListen.Close()
go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout)
// No stderr on TTY.
if !c.ProcessConfig.Tty {
// Connect stderr
stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stderr")
createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stderr")
errListen, err = npipe.Listen(stdErrPipe)
if err != nil {
logrus.Errorf("stderr failed to listen on %s err=%s", stdErrPipe, err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer errListen.Close()
go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr)
}
// This should get caught earlier, but just in case - validate that we
// have something to run
if c.ProcessConfig.Entrypoint == "" {
err = errors.New("No entrypoint specified")
logrus.Error(err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
// Build the command line of the process
createProcessParms.CommandLine = c.ProcessConfig.Entrypoint
for _, arg := range c.ProcessConfig.Arguments {
logrus.Debugln("appending ", arg)
createProcessParms.CommandLine += " " + arg
}
logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine)
// Start the command running in the container.
var pid uint32
pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms)
if err != nil {
logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
//Save the PID as we'll need this in Kill()
logrus.Debugf("PID %d", pid)
c.ContainerPid = int(pid)
if c.ProcessConfig.Tty {
term = NewTtyConsole(c.ID, pid)
} else {
term = NewStdConsole()
}
c.ProcessConfig.Terminal = term
// Maintain our list of active containers. We'll need this later for exec
// and other commands.
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
command: c,
}
d.Unlock()
// Invoke the start callback
if startCallback != nil {
startCallback(&c.ProcessConfig, int(pid))
}
var exitCode int32
exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid)
if err != nil {
logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}
示例2: Run
//.........这里部分代码省略.........
"mount --make-rslave /; exec " +
utils.ShellQuoteArguments(params)
params = []string{
"unshare", "-m", "--", "/bin/sh", "-c", shellString,
}
}
logrus.Debugf("lxc params %s", params)
var (
name = params[0]
arg = params[1:]
)
aname, err := exec.LookPath(name)
if err != nil {
aname = name
}
c.ProcessConfig.Path = aname
c.ProcessConfig.Args = append([]string{name}, arg...)
if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
if err := c.ProcessConfig.Start(); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
var (
waitErr error
waitLock = make(chan struct{})
)
go func() {
if err := c.ProcessConfig.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
waitErr = err
}
}
close(waitLock)
}()
terminate := func(terr error) (execdriver.ExitStatus, error) {
if c.ProcessConfig.Process != nil {
c.ProcessConfig.Process.Kill()
c.ProcessConfig.Wait()
}
return execdriver.ExitStatus{ExitCode: -1}, terr
}
// Poll lxc for RUNNING status
pid, err := d.waitForStart(c, waitLock)
if err != nil {
return terminate(err)
}
cgroupPaths, err := cgroupPaths(c.ID)
if err != nil {
return terminate(err)
}
state := &libcontainer.State{
InitProcessPid: pid,
CgroupPaths: cgroupPaths,
}
f, err := os.Create(filepath.Join(dataPath, "state.json"))
if err != nil {
return terminate(err)
}
defer f.Close()
if err := json.NewEncoder(f).Encode(state); err != nil {
return terminate(err)
}
c.ContainerPid = pid
if startCallback != nil {
logrus.Debugf("Invoking startCallback")
startCallback(&c.ProcessConfig, pid)
}
oomKill := false
oomKillNotification, err := notifyOnOOM(cgroupPaths)
<-waitLock
if err == nil {
_, oomKill = <-oomKillNotification
logrus.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
} else {
logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
}
// check oom error
exitCode := getExitCode(c)
if oomKill {
exitCode = 137
}
return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
}
示例3: Run
//.........这里部分代码省略.........
params = append(params,
"-mtu", strconv.Itoa(c.Network.Mtu),
)
if c.ProcessConfig.User != "" {
params = append(params, "-u", c.ProcessConfig.User)
}
if c.ProcessConfig.Privileged {
if d.apparmor {
params[0] = path.Join(d.root, "lxc-start-unconfined")
}
params = append(params, "-privileged")
}
if c.WorkingDir != "" {
params = append(params, "-w", c.WorkingDir)
}
if len(c.CapAdd) > 0 {
params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":")))
}
if len(c.CapDrop) > 0 {
params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
}
params = append(params, "--", c.ProcessConfig.Entrypoint)
params = append(params, c.ProcessConfig.Arguments...)
if d.sharedRoot {
// lxc-start really needs / to be non-shared, or all kinds of stuff break
// when lxc-start unmount things and those unmounts propagate to the main
// mount namespace.
// What we really want is to clone into a new namespace and then
// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
// without exec in go we have to do this horrible shell hack...
shellString :=
"mount --make-rslave /; exec " +
utils.ShellQuoteArguments(params)
params = []string{
"unshare", "-m", "--", "/bin/sh", "-c", shellString,
}
}
var (
name = params[0]
arg = params[1:]
)
aname, err := exec.LookPath(name)
if err != nil {
aname = name
}
c.ProcessConfig.Path = aname
c.ProcessConfig.Args = append([]string{name}, arg...)
if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
return -1, err
}
if err := c.ProcessConfig.Start(); err != nil {
return -1, err
}
var (
waitErr error
waitLock = make(chan struct{})
)
go func() {
if err := c.ProcessConfig.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
waitErr = err
}
}
close(waitLock)
}()
// Poll lxc for RUNNING status
pid, err := d.waitForStart(c, waitLock)
if err != nil {
if c.ProcessConfig.Process != nil {
c.ProcessConfig.Process.Kill()
c.ProcessConfig.Wait()
}
return -1, err
}
c.ContainerPid = pid
if startCallback != nil {
startCallback(&c.ProcessConfig, pid)
}
<-waitLock
return getExitCode(c), waitErr
}
示例4: Run
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
// take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c)
if err != nil {
return -1, err
}
var term execdriver.Terminal
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
if err != nil {
return -1, err
}
c.ProcessConfig.Terminal = term
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
var (
dataPath = filepath.Join(d.root, c.ID)
args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
)
if err := d.createContainerRoot(c.ID); err != nil {
return -1, err
}
defer d.removeContainerRoot(c.ID)
if err := d.writeContainerFile(container, c.ID); err != nil {
return -1, err
}
return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
c.ProcessConfig.Path = d.initPath
c.ProcessConfig.Args = append([]string{
DriverName,
"-console", console,
"-pipe", "3",
"-root", filepath.Join(d.root, c.ID),
"--",
}, args...)
// set this to nil so that when we set the clone flags anything else is reset
c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
}
c.ProcessConfig.ExtraFiles = []*os.File{child}
c.ProcessConfig.Env = container.Env
c.ProcessConfig.Dir = c.Rootfs
return &c.ProcessConfig.Cmd
}, func() {
if startCallback != nil {
c.ContainerPid = c.ProcessConfig.Process.Pid
startCallback(&c.ProcessConfig, c.ContainerPid)
}
})
}
示例5: Run
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
// take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
var term execdriver.Terminal
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
c.ProcessConfig.Terminal = term
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
var (
dataPath = filepath.Join(d.root, c.ID)
args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
)
if err := d.createContainerRoot(c.ID); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer d.cleanContainer(c.ID)
if err := d.writeContainerFile(container, c.ID); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
execOutputChan := make(chan execOutput, 1)
waitForStart := make(chan struct{})
go func() {
exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
c.ProcessConfig.Path = d.initPath
c.ProcessConfig.Args = append([]string{
DriverName,
"-console", console,
"-pipe", "3",
"-root", filepath.Join(d.root, c.ID),
"--",
}, args...)
// set this to nil so that when we set the clone flags anything else is reset
c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
}
c.ProcessConfig.ExtraFiles = []*os.File{child}
c.ProcessConfig.Env = container.Env
c.ProcessConfig.Dir = container.RootFs
return &c.ProcessConfig.Cmd
}, func() {
close(waitForStart)
if startCallback != nil {
c.ContainerPid = c.ProcessConfig.Process.Pid
startCallback(&c.ProcessConfig, c.ContainerPid)
}
})
execOutputChan <- execOutput{exitCode, err}
}()
select {
case execOutput := <-execOutputChan:
return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
case <-waitForStart:
break
}
oomKill := false
state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
if err == nil {
oomKillNotification, err := libcontainer.NotifyOnOOM(state)
if err == nil {
_, oomKill = <-oomKillNotification
} else {
log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
}
} else {
log.Warnf("Failed to get container state, oom notify will not work: %s", err)
}
// wait for the container to exit.
execOutput := <-execOutputChan
return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
}
示例6: Run
//.........这里部分代码省略.........
}
// Start the container
logrus.Debugln("Starting container ", c.ID)
err = hcsshim.StartComputeSystem(c.ID)
if err != nil {
logrus.Errorf("Failed to start compute system: %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer func() {
// Stop the container
if forceKill {
logrus.Debugf("Forcibly terminating container %s", c.ID)
if errno, err := hcsshim.TerminateComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
logrus.Warnf("Ignoring error from TerminateComputeSystem 0x%X %s", errno, err)
}
} else {
logrus.Debugf("Shutting down container %s", c.ID)
if errno, err := hcsshim.ShutdownComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
if errno != hcsshim.Win32SystemShutdownIsInProgress &&
errno != hcsshim.Win32SpecifiedPathInvalid &&
errno != hcsshim.Win32SystemCannotFindThePathSpecified {
logrus.Warnf("Ignoring error from ShutdownComputeSystem 0x%X %s", errno, err)
}
}
}
}()
createProcessParms := hcsshim.CreateProcessParams{
EmulateConsole: c.ProcessConfig.Tty,
WorkingDirectory: c.WorkingDir,
ConsoleSize: c.ProcessConfig.ConsoleSize,
}
// Configure the environment for the process
createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env)
createProcessParms.CommandLine, err = createCommandLine(&c.ProcessConfig, c.ArgsEscaped)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
// Start the command running in the container.
pid, stdin, stdout, stderr, _, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms)
if err != nil {
logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
// Now that the process has been launched, begin copying data to and from
// the named pipes for the std handles.
setupPipes(stdin, stdout, stderr, pipes)
//Save the PID as we'll need this in Kill()
logrus.Debugf("PID %d", pid)
c.ContainerPid = int(pid)
if c.ProcessConfig.Tty {
term = NewTtyConsole(c.ID, pid)
} else {
term = NewStdConsole()
}
c.ProcessConfig.Terminal = term
// Maintain our list of active containers. We'll need this later for exec
// and other commands.
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
command: c,
}
d.Unlock()
if hooks.Start != nil {
// A closed channel for OOM is returned here as it will be
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, int(pid), chOOM)
}
var (
exitCode int32
errno uint32
)
exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite)
if err != nil {
if errno != hcsshim.Win32PipeHasBeenEnded {
logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): %s", err)
}
// Do NOT return err here as the container would have
// started, otherwise docker will deadlock. It's perfectly legitimate
// for WaitForProcessInComputeSystem to fail in situations such
// as the container being killed on another thread.
return execdriver.ExitStatus{ExitCode: hcsshim.WaitErrExecFailed}, nil
}
logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}
示例7: Run
//.........这里部分代码省略.........
// Stop the container
if forceKill {
logrus.Debugf("Forcibly terminating container %s", c.ID)
if errno, err := hcsshim.TerminateComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
logrus.Warnf("Ignoring error from TerminateComputeSystem 0x%X %s", errno, err)
}
} else {
logrus.Debugf("Shutting down container %s", c.ID)
if errno, err := hcsshim.ShutdownComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
if errno != hcsshim.Win32SystemShutdownIsInProgress &&
errno != hcsshim.Win32SpecifiedPathInvalid &&
errno != hcsshim.Win32SystemCannotFindThePathSpecified {
logrus.Warnf("Ignoring error from ShutdownComputeSystem 0x%X %s", errno, err)
}
}
}
}()
createProcessParms := hcsshim.CreateProcessParams{
EmulateConsole: c.ProcessConfig.Tty,
WorkingDirectory: c.WorkingDir,
ConsoleSize: c.ProcessConfig.ConsoleSize,
}
// Configure the environment for the process
createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env)
// This should get caught earlier, but just in case - validate that we
// have something to run
if c.ProcessConfig.Entrypoint == "" {
err = errors.New("No entrypoint specified")
logrus.Error(err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
// Build the command line of the process
createProcessParms.CommandLine = c.ProcessConfig.Entrypoint
for _, arg := range c.ProcessConfig.Arguments {
logrus.Debugln("appending ", arg)
createProcessParms.CommandLine += " " + syscall.EscapeArg(arg)
}
logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine)
// Start the command running in the container.
pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms)
if err != nil {
logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
// Now that the process has been launched, begin copying data to and from
// the named pipes for the std handles.
setupPipes(stdin, stdout, stderr, pipes)
//Save the PID as we'll need this in Kill()
logrus.Debugf("PID %d", pid)
c.ContainerPid = int(pid)
if c.ProcessConfig.Tty {
term = NewTtyConsole(c.ID, pid)
} else {
term = NewStdConsole()
}
c.ProcessConfig.Terminal = term
// Maintain our list of active containers. We'll need this later for exec
// and other commands.
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
command: c,
}
d.Unlock()
if hooks.Start != nil {
// A closed channel for OOM is returned here as it will be
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, int(pid), chOOM)
}
var (
exitCode int32
errno uint32
)
exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite)
if err != nil {
if errno != hcsshim.Win32PipeHasBeenEnded {
logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): %s", err)
}
// Do NOT return err here as the container would have
// started, otherwise docker will deadlock. It's perfectly legitimate
// for WaitForProcessInComputeSystem to fail in situations such
// as the container being killed on another thread.
return execdriver.ExitStatus{ExitCode: hcsshim.WaitErrExecFailed}, nil
}
logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}
示例8: Run
//.........这里部分代码省略.........
// Listen on the named pipe
inListen, err = npipe.Listen(stdInPipe)
if err != nil {
logrus.Errorf("stdin failed to listen on %s err=%s", stdInPipe, err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer inListen.Close()
// Launch a goroutine to do the accept. We do this so that we can
// cause an otherwise blocking goroutine to gracefully close when
// the caller (us) closes the listener
go stdinAccept(inListen, stdInPipe, pipes.Stdin)
}
// Connect stdout
stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdout")
createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdout")
outListen, err = npipe.Listen(stdOutPipe)
if err != nil {
logrus.Errorf("stdout failed to listen on %s err=%s", stdOutPipe, err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer outListen.Close()
go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout)
// No stderr on TTY.
if !c.ProcessConfig.Tty {
// Connect stderr
stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stderr")
createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stderr")
errListen, err = npipe.Listen(stdErrPipe)
if err != nil {
logrus.Errorf("stderr failed to listen on %s err=%s", stdErrPipe, err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer errListen.Close()
go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr)
}
// This should get caught earlier, but just in case - validate that we
// have something to run
if c.ProcessConfig.Entrypoint == "" {
err = errors.New("No entrypoint specified")
logrus.Error(err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
// Build the command line of the process
createProcessParms.CommandLine = c.ProcessConfig.Entrypoint
for _, arg := range c.ProcessConfig.Arguments {
logrus.Debugln("appending ", arg)
createProcessParms.CommandLine += " " + arg
}
logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine)
// Start the command running in the container.
var pid uint32
pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms)
if err != nil {
logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
//Save the PID as we'll need this in Kill()
logrus.Debugf("PID %d", pid)
c.ContainerPid = int(pid)
if c.ProcessConfig.Tty {
term = NewTtyConsole(c.ID, pid)
} else {
term = NewStdConsole()
}
c.ProcessConfig.Terminal = term
// Maintain our list of active containers. We'll need this later for exec
// and other commands.
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
command: c,
}
d.Unlock()
// Invoke the start callback
if startCallback != nil {
startCallback(&c.ProcessConfig, int(pid))
}
var exitCode int32
exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid)
if err != nil {
logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}