本文整理汇总了Golang中github.com/docker/docker/daemon/execdriver.Hooks类的典型用法代码示例。如果您正苦于以下问题:Golang Hooks类的具体用法?Golang Hooks怎么用?Golang Hooks使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Hooks类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Exec
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
active := d.activeContainers[c.ID]
if active == nil {
return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
}
p := &libcontainer.Process{
Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
Env: c.ProcessConfig.Env,
Cwd: c.WorkingDir,
User: processConfig.User,
}
if processConfig.Privileged {
p.Capabilities = execdriver.GetAllCapabilities()
}
// add CAP_ prefix to all caps for new libcontainer update to match
// the spec format.
for i, s := range p.Capabilities {
if !strings.HasPrefix(s, "CAP_") {
p.Capabilities[i] = fmt.Sprintf("CAP_%s", s)
}
}
config := active.Config()
if err := setupPipes(&config, processConfig, p, pipes); err != nil {
return -1, err
}
if err := active.Start(p); err != nil {
return -1, err
}
if hooks.Start != nil {
pid, err := p.Pid()
if err != nil {
p.Signal(os.Kill)
p.Wait()
return -1, err
}
// A closed channel for OOM is returned here as it will be
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, pid, chOOM)
}
ps, err := p.Wait()
if err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
return -1, err
}
ps = exitErr.ProcessState
}
return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
示例2: Run
// Run uses the execution driver to run a given container
func (daemon *Daemon) Run(c *container.Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
hooks := execdriver.Hooks{
Start: startCallback,
}
hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
return daemon.setNetworkNamespaceKey(c.ID, pid)
})
return daemon.execDriver.Run(c.Command, pipes, hooks)
}
示例3: run
func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
hooks := execdriver.Hooks{
Start: startCallback,
}
hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int) error {
return c.setNetworkNamespaceKey(pid)
})
return daemon.execDriver.Run(c.command, pipes, hooks)
}
示例4: Exec
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
active := d.activeContainers[c.ID]
if active == nil {
return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
}
p := &libcontainer.Process{
Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
Env: c.ProcessConfig.Env,
Cwd: c.WorkingDir,
User: processConfig.User,
}
if processConfig.Privileged {
p.Capabilities = execdriver.GetAllCapabilities()
}
config := active.Config()
if err := setupPipes(&config, processConfig, p, pipes); err != nil {
return -1, err
}
if err := active.Start(p); err != nil {
return -1, err
}
if hooks.Start != nil {
pid, err := p.Pid()
if err != nil {
p.Signal(os.Kill)
p.Wait()
return -1, err
}
hooks.Start(&c.ProcessConfig, pid)
}
ps, err := p.Wait()
if err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
return -1, err
}
ps = exitErr.ProcessState
}
return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
示例5: Run
// Run implements the exec driver Driver interface,
// it calls libcontainer APIs to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
// take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c, hooks)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
p := &libcontainer.Process{
Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...),
Env: c.ProcessConfig.Env,
Cwd: c.WorkingDir,
User: c.ProcessConfig.User,
}
if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cont, err := d.factory.Create(c.ID, container)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
d.Lock()
d.activeContainers[c.ID] = cont
d.Unlock()
defer func() {
cont.Destroy()
d.cleanContainer(c.ID)
}()
if err := cont.Start(p); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
oom := notifyOnOOM(cont)
if hooks.Start != nil {
pid, err := p.Pid()
if err != nil {
p.Signal(os.Kill)
p.Wait()
return execdriver.ExitStatus{ExitCode: -1}, err
}
hooks.Start(&c.ProcessConfig, pid, oom)
}
waitF := p.Wait
if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) {
// we need such hack for tracking processes with inherited fds,
// because cmd.Wait() waiting for all streams to be copied
waitF = waitInPIDHost(p, cont)
}
ps, err := waitF()
if err != nil {
execErr, ok := err.(*exec.ExitError)
if !ok {
return execdriver.ExitStatus{ExitCode: -1}, err
}
ps = execErr.ProcessState
}
cont.Destroy()
_, oomKill := <-oom
return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil
}
示例6: Exec
// Exec implements the exec driver Driver interface.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
var (
term execdriver.Terminal
err error
exitCode int32
errno uint32
)
active := d.activeContainers[c.ID]
if active == nil {
return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID)
}
createProcessParms := hcsshim.CreateProcessParams{
EmulateConsole: processConfig.Tty, // Note NOT c.ProcessConfig.Tty
WorkingDirectory: c.WorkingDir,
}
// Configure the environment for the process // Note NOT c.ProcessConfig.Env
createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env)
// Create the commandline for the process // Note NOT c.ProcessConfig
createProcessParms.CommandLine, err = createCommandLine(processConfig, false)
if err != nil {
return -1, err
}
// Start the command running in the container.
pid, stdin, stdout, stderr, rc, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms)
if err != nil {
// TODO Windows: TP4 Workaround. In Hyper-V containers, there is a limitation
// of one exec per container. This should be fixed post TP4. CreateProcessInComputeSystem
// will return a specific error which we handle here to give a good error message
// back to the user instead of an inactionable "An invalid argument was supplied"
if rc == hcsshim.Win32InvalidArgument {
return -1, fmt.Errorf("The limit of docker execs per Hyper-V container has been exceeded")
}
logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
return -1, err
}
// Now that the process has been launched, begin copying data to and from
// the named pipes for the std handles.
setupPipes(stdin, stdout, stderr, pipes)
// Note NOT c.ProcessConfig.Tty
if processConfig.Tty {
term = NewTtyConsole(c.ID, pid)
} else {
term = NewStdConsole()
}
processConfig.Terminal = term
// Invoke the start callback
if hooks.Start != nil {
// A closed channel for OOM is returned here as it will be
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, int(pid), chOOM)
}
if exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite); err != nil {
if errno == hcsshim.Win32PipeHasBeenEnded {
logrus.Debugf("Exiting Run() after WaitForProcessInComputeSystem failed with recognised error 0x%X", errno)
return hcsshim.WaitErrExecFailed, nil
}
logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): 0x%X %s", errno, err)
return -1, err
}
logrus.Debugln("Exiting Run()", c.ID)
return int(exitCode), nil
}
示例7: Run
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
)
cu := &containerInit{
SystemType: "Container",
Name: c.ID,
Owner: defaultOwner,
IsDummy: dummyMode,
VolumePath: c.Rootfs,
IgnoreFlushesDuringBoot: c.FirstStart,
LayerFolderPath: c.LayerFolder,
ProcessorWeight: c.Resources.CPUShares,
HostName: c.Hostname,
}
cu.HvPartition = c.HvPartition
if cu.HvPartition {
cu.SandboxPath = filepath.Dir(c.LayerFolder)
} else {
cu.VolumePath = c.Rootfs
cu.LayerFolderPath = c.LayerFolder
}
for _, layerPath := range c.LayerPaths {
_, filename := filepath.Split(layerPath)
g, err := hcsshim.NameToGuid(filename)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu.Layers = append(cu.Layers, layer{
ID: g.ToString(),
Path: layerPath,
})
}
// Add the mounts (volumes, bind mounts etc) to the structure
mds := make([]mappedDir, len(c.Mounts))
for i, mount := range c.Mounts {
mds[i] = mappedDir{
HostPath: mount.Source,
ContainerPath: mount.Destination,
ReadOnly: !mount.Writable}
}
cu.MappedDirectories = mds
// TODO Windows. At some point, when there is CLI on docker run to
// enable the IP Address of the container to be passed into docker run,
// the IP Address needs to be wired through to HCS in the JSON. It
// would be present in c.Network.Interface.IPAddress. See matching
// TODO in daemon\container_windows.go, function populateCommand.
if c.Network.Interface != nil {
var pbs []portBinding
// Enumerate through the port bindings specified by the user and convert
// them into the internal structure matching the JSON blob that can be
// understood by the HCS.
for i, v := range c.Network.Interface.PortBindings {
proto := strings.ToUpper(i.Proto())
if proto != "TCP" && proto != "UDP" {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
}
if len(v) > 1 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
}
for _, v2 := range v {
var (
iPort, ePort int
err error
)
if len(v2.HostIP) != 0 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
}
if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
}
if iPort, err = strconv.Atoi(i.Port()); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
}
if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
}
pbs = append(pbs,
portBinding{ExternalPort: ePort,
InternalPort: iPort,
Protocol: proto})
}
}
// TODO Windows: TP3 workaround. Allow the user to override the name of
// the Container NAT device through an environment variable. This will
//.........这里部分代码省略.........
示例8: Run
// Run implements the exec driver Driver interface,
// it calls libcontainer APIs to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
destroyed := false
var err error
c.TmpDir, err = ioutil.TempDir("", c.ID)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer os.RemoveAll(c.TmpDir)
// take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c, hooks)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
p := &libcontainer.Process{
Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...),
Env: c.ProcessConfig.Env,
Cwd: c.WorkingDir,
User: c.ProcessConfig.User,
}
wg := sync.WaitGroup{}
writers, err := setupPipes(container, &c.ProcessConfig, p, pipes, &wg)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cont, err := d.factory.Create(c.ID, container)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
d.Lock()
d.activeContainers[c.ID] = cont
d.Unlock()
defer func() {
if !destroyed {
cont.Destroy()
}
d.cleanContainer(c.ID)
}()
if err := cont.Start(p); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
//close the write end of any opened pipes now that they are dup'ed into the container
for _, writer := range writers {
writer.Close()
}
// 'oom' is used to emit 'oom' events to the eventstream, 'oomKilled' is used
// to set the 'OOMKilled' flag in state
oom := notifyOnOOM(cont)
oomKilled := notifyOnOOM(cont)
if hooks.Start != nil {
pid, err := p.Pid()
if err != nil {
p.Signal(os.Kill)
p.Wait()
return execdriver.ExitStatus{ExitCode: -1}, err
}
hooks.Start(&c.ProcessConfig, pid, oom)
}
waitF := p.Wait
if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) {
// we need such hack for tracking processes with inherited fds,
// because cmd.Wait() waiting for all streams to be copied
waitF = waitInPIDHost(p, cont)
}
ps, err := waitF()
if err != nil {
execErr, ok := err.(*exec.ExitError)
if !ok {
return execdriver.ExitStatus{ExitCode: -1}, err
}
ps = execErr.ProcessState
}
// wait for all IO goroutine copiers to finish
wg.Wait()
cont.Destroy()
destroyed = true
// oomKilled will have an oom event if any process within the container was
// OOM killed at any time, not only if the init process OOMed.
//
// Perhaps we only want the OOMKilled flag to be set if the OOM
// resulted in a container death, but there isn't a good way to do this
// because the kernel's cgroup oom notification does not provide information
// such as the PID. This could be heuristically done by checking that the OOM
// happened within some very small time slice for the container dying (and
// optionally exit-code 137), but I don't think the cgroup oom notification
// can be used to reliably determine this
//
// Even if there were multiple OOMs, it's sufficient to read one value
// because libcontainer's oom notify will discard the channel after the
// cgroup is destroyed
_, oomKill := <-oomKilled
//.........这里部分代码省略.........
示例9: Exec
// Exec implements the exec driver Driver interface.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
var (
term execdriver.Terminal
err error
exitCode int32
errno uint32
)
active := d.activeContainers[c.ID]
if active == nil {
return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID)
}
createProcessParms := hcsshim.CreateProcessParams{
EmulateConsole: processConfig.Tty, // Note NOT c.ProcessConfig.Tty
WorkingDirectory: c.WorkingDir,
}
// Configure the environment for the process // Note NOT c.ProcessConfig.Tty
createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env)
// While this should get caught earlier, just in case, validate that we
// have something to run.
if processConfig.Entrypoint == "" {
err = errors.New("No entrypoint specified")
logrus.Error(err)
return -1, err
}
// Build the command line of the process
createProcessParms.CommandLine = processConfig.Entrypoint
for _, arg := range processConfig.Arguments {
logrus.Debugln("appending ", arg)
createProcessParms.CommandLine += " " + arg
}
logrus.Debugln("commandLine: ", createProcessParms.CommandLine)
// Start the command running in the container.
pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms)
if err != nil {
logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
return -1, err
}
// Now that the process has been launched, begin copying data to and from
// the named pipes for the std handles.
setupPipes(stdin, stdout, stderr, pipes)
// Note NOT c.ProcessConfig.Tty
if processConfig.Tty {
term = NewTtyConsole(c.ID, pid)
} else {
term = NewStdConsole()
}
processConfig.Terminal = term
// Invoke the start callback
if hooks.Start != nil {
// A closed channel for OOM is returned here as it will be
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, int(pid), chOOM)
}
if exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite); err != nil {
if errno == hcsshim.Win32PipeHasBeenEnded {
logrus.Debugf("Exiting Run() after WaitForProcessInComputeSystem failed with recognised error 0x%X", errno)
return hcsshim.WaitErrExecFailed, nil
}
logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): 0x%X %s", errno, err)
return -1, err
}
logrus.Debugln("Exiting Run()", c.ID)
return int(exitCode), nil
}
示例10: Run
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
)
// Make sure the client isn't asking for options which aren't supported
err = checkSupportedOptions(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu := &containerInit{
SystemType: "Container",
Name: c.ID,
Owner: defaultOwner,
IsDummy: dummyMode,
VolumePath: c.Rootfs,
IgnoreFlushesDuringBoot: c.FirstStart,
LayerFolderPath: c.LayerFolder,
}
for i := 0; i < len(c.LayerPaths); i++ {
_, filename := filepath.Split(c.LayerPaths[i])
g, err := hcsshim.NameToGuid(filename)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu.Layers = append(cu.Layers, layer{
ID: g.ToString(),
Path: c.LayerPaths[i],
})
}
// TODO Windows. At some point, when there is CLI on docker run to
// enable the IP Address of the container to be passed into docker run,
// the IP Address needs to be wired through to HCS in the JSON. It
// would be present in c.Network.Interface.IPAddress. See matching
// TODO in daemon\container_windows.go, function populateCommand.
if c.Network.Interface != nil {
var pbs []portBinding
// Enumerate through the port bindings specified by the user and convert
// them into the internal structure matching the JSON blob that can be
// understood by the HCS.
for i, v := range c.Network.Interface.PortBindings {
proto := strings.ToUpper(i.Proto())
if proto != "TCP" && proto != "UDP" {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
}
if len(v) > 1 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
}
for _, v2 := range v {
var (
iPort, ePort int
err error
)
if len(v2.HostIP) != 0 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
}
if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
}
if iPort, err = strconv.Atoi(i.Port()); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
}
if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
}
pbs = append(pbs,
portBinding{ExternalPort: ePort,
InternalPort: iPort,
Protocol: proto})
}
}
// TODO Windows: TP3 workaround. Allow the user to override the name of
// the Container NAT device through an environment variable. This will
// ultimately be a global daemon parameter on Windows, similar to -b
// for the name of the virtual switch (aka bridge).
cn := os.Getenv("DOCKER_CONTAINER_NAT")
if len(cn) == 0 {
cn = defaultContainerNAT
}
dev := device{
DeviceType: "Network",
Connection: &networkConnection{
NetworkName: c.Network.Interface.Bridge,
// TODO Windows: Fixme, next line. Needs HCS fix.
EnableNat: false,
Nat: natSettings{
Name: cn,
//.........这里部分代码省略.........
示例11: Run
// Run implements the exec driver Driver interface,
// it calls 'exec.Cmd' to launch lxc commands to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
dataPath = d.containerDir(c.ID)
)
if c.Network == nil || (c.Network.NamespacePath == "" && c.Network.ContainerID == "") {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("empty namespace path for non-container network")
}
container, err := d.createContainer(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
c.ProcessConfig.Terminal = term
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
c.Mounts = append(c.Mounts, execdriver.Mount{
Source: d.initPath,
Destination: c.InitPath,
Writable: false,
Private: true,
})
if err := d.generateEnvConfig(c); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
configPath, err := d.generateLXCConfig(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
params := []string{
"lxc-start",
"-n", c.ID,
"-f", configPath,
"-q",
}
// From lxc>=1.1 the default behavior is to daemonize containers after start
lxcVersion := version.Version(d.version())
if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) {
params = append(params, "-F")
}
proc := &os.Process{}
if c.Network.ContainerID != "" {
params = append(params,
"--share-net", c.Network.ContainerID,
)
} else {
proc, err = setupNetNs(c.Network.NamespacePath)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
pidStr := fmt.Sprintf("%d", proc.Pid)
params = append(params,
"--share-net", pidStr)
}
if c.Ipc != nil {
if c.Ipc.ContainerID != "" {
params = append(params,
"--share-ipc", c.Ipc.ContainerID,
)
} else if c.Ipc.HostIpc {
params = append(params,
"--share-ipc", "1",
)
}
}
params = append(params,
"--",
c.InitPath,
)
if c.ProcessConfig.User != "" {
params = append(params, "-u", c.ProcessConfig.User)
}
if c.ProcessConfig.Privileged {
if d.apparmor {
//.........这里部分代码省略.........
示例12: Exec
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
active := d.activeContainers[c.ID]
if active == nil {
return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
}
user := processConfig.User
if c.RemappedRoot.UID != 0 && user == "" {
//if user namespaces are enabled, set user explicitly so uid/gid is set to 0
//otherwise we end up with the overflow id and no permissions (65534)
user = "0"
}
p := &libcontainer.Process{
Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
Env: c.ProcessConfig.Env,
Cwd: c.WorkingDir,
User: user,
}
if processConfig.Privileged {
p.Capabilities = execdriver.GetAllCapabilities()
}
// add CAP_ prefix to all caps for new libcontainer update to match
// the spec format.
for i, s := range p.Capabilities {
if !strings.HasPrefix(s, "CAP_") {
p.Capabilities[i] = fmt.Sprintf("CAP_%s", s)
}
}
config := active.Config()
wg := sync.WaitGroup{}
writers, err := setupPipes(&config, processConfig, p, pipes, &wg)
if err != nil {
return -1, err
}
if err := active.Start(p); err != nil {
return -1, err
}
//close the write end of any opened pipes now that they are dup'ed into the container
for _, writer := range writers {
writer.Close()
}
if hooks.Start != nil {
pid, err := p.Pid()
if err != nil {
p.Signal(os.Kill)
p.Wait()
return -1, err
}
// A closed channel for OOM is returned here as it will be
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, pid, chOOM)
}
ps, err := p.Wait()
if err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
return -1, err
}
ps = exitErr.ProcessState
}
// wait for all IO goroutine copiers to finish
wg.Wait()
return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}