本文整理汇总了Golang中github.com/docker/docker/daemon/execdriver.Command类的典型用法代码示例。如果您正苦于以下问题:Golang Command类的具体用法?Golang Command怎么用?Golang Command使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Command类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewTtyConsole
func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
// lxc is special in that we cannot create the master outside of the container without
// opening the slave because we have nothing to provide to the cmd. We have to open both then do
// the crazy setup on command right now instead of passing the console path to lxc and telling it
// to open up that console. we save a couple of openfiles in the native driver because we can do
// this.
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return nil, err
}
tty := &TtyConsole{
MasterPty: ptyMaster,
SlavePty: ptySlave,
}
if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
tty.Close()
return nil, err
}
command.Console = tty.SlavePty.Name()
return tty, nil
}
示例2: generateEnvConfig
func (d *driver) generateEnvConfig(c *execdriver.Command) error {
data, err := json.Marshal(c.Env)
if err != nil {
return err
}
p := path.Join(d.root, "containers", c.ID, "config.env")
c.Mounts = append(c.Mounts, execdriver.Mount{p, "/.dockerenv", false, true})
return ioutil.WriteFile(p, data, 0600)
}
示例3: callback
// callback ensures that the container's state is properly updated after we
// received ack from the execution drivers
func (m *containerMonitor) callback(command *execdriver.Command) {
if command.Tty {
// The callback is called after the process Start()
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
// which we close here.
if c, ok := command.Stdout.(io.Closer); ok {
c.Close()
}
}
m.container.State.SetRunning(command.Pid())
if m.startSignal != nil {
// signal that the process has started
close(m.startSignal)
m.startSignal = nil
}
if err := m.container.ToDisk(); err != nil {
log.Debugf("%s", err)
}
}
示例4: generateEnvConfig
func (d *driver) generateEnvConfig(c *execdriver.Command) error {
data, err := json.Marshal(c.ProcessConfig.Env)
if err != nil {
return err
}
p := path.Join(d.root, "containers", c.ID, "config.env")
c.Mounts = append(c.Mounts, execdriver.Mount{
Source: p,
Destination: "/.dockerenv",
Writable: false,
Private: true,
})
return ioutil.WriteFile(p, data, 0600)
}
示例5: NewTtyConsole
func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
ptyMaster, console, err := system.CreateMasterAndConsole()
if err != nil {
return nil, err
}
tty := &TtyConsole{
MasterPty: ptyMaster,
}
if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
tty.Close()
return nil, err
}
command.Console = console
return tty, nil
}
示例6: Run
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
inListen, outListen, errListen *npipe.PipeListener
)
// Make sure the client isn't asking for options which aren't supported
err = checkSupportedOptions(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu := &containerInit{
SystemType: "Container",
Name: c.ID,
IsDummy: dummyMode,
VolumePath: c.Rootfs,
IgnoreFlushesDuringBoot: c.FirstStart,
LayerFolderPath: c.LayerFolder,
}
for i := 0; i < len(c.LayerPaths); i++ {
cu.Layers = append(cu.Layers, layer{
ID: hcsshim.NewGUID(c.LayerPaths[i]).ToString(),
Path: c.LayerPaths[i],
})
}
// TODO Windows. At some point, when there is CLI on docker run to
// enable the IP Address of the container to be passed into docker run,
// the IP Address needs to be wired through to HCS in the JSON. It
// would be present in c.Network.Interface.IPAddress. See matching
// TODO in daemon\container_windows.go, function populateCommand.
if c.Network.Interface != nil {
var pbs []portBinding
// Enumerate through the port bindings specified by the user and convert
// them into the internal structure matching the JSON blob that can be
// understood by the HCS.
for i, v := range c.Network.Interface.PortBindings {
proto := strings.ToUpper(i.Proto())
if proto != "TCP" && proto != "UDP" {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
}
if len(v) > 1 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
}
for _, v2 := range v {
var (
iPort, ePort int
err error
)
if len(v2.HostIP) != 0 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
}
if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
}
if iPort, err = strconv.Atoi(i.Port()); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
}
if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
}
pbs = append(pbs,
portBinding{ExternalPort: ePort,
InternalPort: iPort,
Protocol: proto})
}
}
// TODO Windows: TP3 workaround. Allow the user to override the name of
// the Container NAT device through an environment variable. This will
// ultimately be a global daemon parameter on Windows, similar to -b
// for the name of the virtual switch (aka bridge).
cn := os.Getenv("DOCKER_CONTAINER_NAT")
if len(cn) == 0 {
cn = defaultContainerNAT
}
dev := device{
DeviceType: "Network",
Connection: &networkConnection{
NetworkName: c.Network.Interface.Bridge,
// TODO Windows: Fixme, next line. Needs HCS fix.
EnableNat: false,
Nat: natSettings{
Name: cn,
PortBindings: pbs,
},
},
}
//.........这里部分代码省略.........
示例7: Run
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
var (
term execdriver.Terminal
err error
)
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
c.ProcessConfig.Terminal = term
c.Mounts = append(c.Mounts, execdriver.Mount{
Source: d.initPath,
Destination: c.InitPath,
Writable: false,
Private: true,
})
if err := d.generateEnvConfig(c); err != nil {
return -1, err
}
configPath, err := d.generateLXCConfig(c)
if err != nil {
return -1, err
}
params := []string{
"lxc-start",
"-n", c.ID,
"-f", configPath,
"--",
c.InitPath,
}
if c.Network.Interface != nil {
params = append(params,
"-g", c.Network.Interface.Gateway,
"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
)
}
params = append(params,
"-mtu", strconv.Itoa(c.Network.Mtu),
)
if c.ProcessConfig.User != "" {
params = append(params, "-u", c.ProcessConfig.User)
}
if c.ProcessConfig.Privileged {
if d.apparmor {
params[0] = path.Join(d.root, "lxc-start-unconfined")
}
params = append(params, "-privileged")
}
if c.WorkingDir != "" {
params = append(params, "-w", c.WorkingDir)
}
if len(c.CapAdd) > 0 {
params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":")))
}
if len(c.CapDrop) > 0 {
params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
}
params = append(params, "--", c.ProcessConfig.Entrypoint)
params = append(params, c.ProcessConfig.Arguments...)
if d.sharedRoot {
// lxc-start really needs / to be non-shared, or all kinds of stuff break
// when lxc-start unmount things and those unmounts propagate to the main
// mount namespace.
// What we really want is to clone into a new namespace and then
// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
// without exec in go we have to do this horrible shell hack...
shellString :=
"mount --make-rslave /; exec " +
utils.ShellQuoteArguments(params)
params = []string{
"unshare", "-m", "--", "/bin/sh", "-c", shellString,
}
}
var (
name = params[0]
arg = params[1:]
)
aname, err := exec.LookPath(name)
if err != nil {
aname = name
}
c.ProcessConfig.Path = aname
c.ProcessConfig.Args = append([]string{name}, arg...)
if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
//.........这里部分代码省略.........
示例8: Run
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
dataPath = d.containerDir(c.ID)
)
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
c.ProcessConfig.Terminal = term
container, err := d.createContainer(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
c.Mounts = append(c.Mounts, execdriver.Mount{
Source: d.initPath,
Destination: c.InitPath,
Writable: false,
Private: true,
})
if err := d.generateEnvConfig(c); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
configPath, err := d.generateLXCConfig(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
params := []string{
"lxc-start",
"-n", c.ID,
"-f", configPath,
}
// From lxc>=1.1 the default behavior is to daemonize containers after start
lxcVersion := version.Version(d.version())
if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) {
params = append(params, "-F")
}
if c.Network.ContainerID != "" {
params = append(params,
"--share-net", c.Network.ContainerID,
)
}
if c.Ipc != nil {
if c.Ipc.ContainerID != "" {
params = append(params,
"--share-ipc", c.Ipc.ContainerID,
)
} else if c.Ipc.HostIpc {
params = append(params,
"--share-ipc", "1",
)
}
}
params = append(params,
"--",
c.InitPath,
)
if c.Network.Interface != nil {
params = append(params,
"-g", c.Network.Interface.Gateway,
"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
)
}
params = append(params,
"-mtu", strconv.Itoa(c.Network.Mtu),
)
if c.ProcessConfig.User != "" {
params = append(params, "-u", c.ProcessConfig.User)
}
if c.ProcessConfig.Privileged {
if d.apparmor {
params[0] = path.Join(d.root, "lxc-start-unconfined")
}
params = append(params, "-privileged")
}
if c.WorkingDir != "" {
params = append(params, "-w", c.WorkingDir)
}
params = append(params, "--", c.ProcessConfig.Entrypoint)
params = append(params, c.ProcessConfig.Arguments...)
//.........这里部分代码省略.........
示例9: Run
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
// take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c)
if err != nil {
return -1, err
}
var term execdriver.Terminal
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
if err != nil {
return -1, err
}
c.ProcessConfig.Terminal = term
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
var (
dataPath = filepath.Join(d.root, c.ID)
args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
)
if err := d.createContainerRoot(c.ID); err != nil {
return -1, err
}
defer d.removeContainerRoot(c.ID)
if err := d.writeContainerFile(container, c.ID); err != nil {
return -1, err
}
return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
c.ProcessConfig.Path = d.initPath
c.ProcessConfig.Args = append([]string{
DriverName,
"-console", console,
"-pipe", "3",
"-root", filepath.Join(d.root, c.ID),
"--",
}, args...)
// set this to nil so that when we set the clone flags anything else is reset
c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
}
c.ProcessConfig.ExtraFiles = []*os.File{child}
c.ProcessConfig.Env = container.Env
c.ProcessConfig.Dir = c.Rootfs
return &c.ProcessConfig.Cmd
}, func() {
if startCallback != nil {
c.ContainerPid = c.ProcessConfig.Process.Pid
startCallback(&c.ProcessConfig, c.ContainerPid)
}
})
}
示例10: Run
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
// take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
var term execdriver.Terminal
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
c.ProcessConfig.Terminal = term
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
var (
dataPath = filepath.Join(d.root, c.ID)
args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
)
if err := d.createContainerRoot(c.ID); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer d.cleanContainer(c.ID)
if err := d.writeContainerFile(container, c.ID); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
execOutputChan := make(chan execOutput, 1)
waitForStart := make(chan struct{})
go func() {
exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
c.ProcessConfig.Path = d.initPath
c.ProcessConfig.Args = append([]string{
DriverName,
"-console", console,
"-pipe", "3",
"-root", filepath.Join(d.root, c.ID),
"--",
}, args...)
// set this to nil so that when we set the clone flags anything else is reset
c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
}
c.ProcessConfig.ExtraFiles = []*os.File{child}
c.ProcessConfig.Env = container.Env
c.ProcessConfig.Dir = container.RootFs
return &c.ProcessConfig.Cmd
}, func() {
close(waitForStart)
if startCallback != nil {
c.ContainerPid = c.ProcessConfig.Process.Pid
startCallback(&c.ProcessConfig, c.ContainerPid)
}
})
execOutputChan <- execOutput{exitCode, err}
}()
select {
case execOutput := <-execOutputChan:
return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
case <-waitForStart:
break
}
oomKill := false
state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
if err == nil {
oomKillNotification, err := libcontainer.NotifyOnOOM(state)
if err == nil {
_, oomKill = <-oomKillNotification
} else {
log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
}
} else {
log.Warnf("Failed to get container state, oom notify will not work: %s", err)
}
// wait for the container to exit.
execOutput := <-execOutputChan
return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
}
示例11: Run
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
)
cu := &containerInit{
SystemType: "Container",
Name: c.ID,
Owner: defaultOwner,
IsDummy: dummyMode,
VolumePath: c.Rootfs,
IgnoreFlushesDuringBoot: c.FirstStart,
LayerFolderPath: c.LayerFolder,
ProcessorWeight: c.Resources.CPUShares,
HostName: c.Hostname,
}
cu.HvPartition = c.HvPartition
if cu.HvPartition {
cu.SandboxPath = filepath.Dir(c.LayerFolder)
} else {
cu.VolumePath = c.Rootfs
cu.LayerFolderPath = c.LayerFolder
}
for _, layerPath := range c.LayerPaths {
_, filename := filepath.Split(layerPath)
g, err := hcsshim.NameToGuid(filename)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu.Layers = append(cu.Layers, layer{
ID: g.ToString(),
Path: layerPath,
})
}
// Add the mounts (volumes, bind mounts etc) to the structure
mds := make([]mappedDir, len(c.Mounts))
for i, mount := range c.Mounts {
mds[i] = mappedDir{
HostPath: mount.Source,
ContainerPath: mount.Destination,
ReadOnly: !mount.Writable}
}
cu.MappedDirectories = mds
// TODO Windows. At some point, when there is CLI on docker run to
// enable the IP Address of the container to be passed into docker run,
// the IP Address needs to be wired through to HCS in the JSON. It
// would be present in c.Network.Interface.IPAddress. See matching
// TODO in daemon\container_windows.go, function populateCommand.
if c.Network.Interface != nil {
var pbs []portBinding
// Enumerate through the port bindings specified by the user and convert
// them into the internal structure matching the JSON blob that can be
// understood by the HCS.
for i, v := range c.Network.Interface.PortBindings {
proto := strings.ToUpper(i.Proto())
if proto != "TCP" && proto != "UDP" {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
}
if len(v) > 1 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
}
for _, v2 := range v {
var (
iPort, ePort int
err error
)
if len(v2.HostIP) != 0 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
}
if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
}
if iPort, err = strconv.Atoi(i.Port()); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
}
if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
}
pbs = append(pbs,
portBinding{ExternalPort: ePort,
InternalPort: iPort,
Protocol: proto})
}
}
// TODO Windows: TP3 workaround. Allow the user to override the name of
// the Container NAT device through an environment variable. This will
//.........这里部分代码省略.........
示例12: Run
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
)
// Make sure the client isn't asking for options which aren't supported
err = checkSupportedOptions(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu := &containerInit{
SystemType: "Container",
Name: c.ID,
Owner: defaultOwner,
IsDummy: dummyMode,
VolumePath: c.Rootfs,
IgnoreFlushesDuringBoot: c.FirstStart,
LayerFolderPath: c.LayerFolder,
ProcessorWeight: c.Resources.CPUShares,
HostName: c.Hostname,
}
for i := 0; i < len(c.LayerPaths); i++ {
_, filename := filepath.Split(c.LayerPaths[i])
g, err := hcsshim.NameToGuid(filename)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu.Layers = append(cu.Layers, layer{
ID: g.ToString(),
Path: c.LayerPaths[i],
})
}
// TODO Windows. At some point, when there is CLI on docker run to
// enable the IP Address of the container to be passed into docker run,
// the IP Address needs to be wired through to HCS in the JSON. It
// would be present in c.Network.Interface.IPAddress. See matching
// TODO in daemon\container_windows.go, function populateCommand.
if c.Network.Interface != nil {
var pbs []portBinding
// Enumerate through the port bindings specified by the user and convert
// them into the internal structure matching the JSON blob that can be
// understood by the HCS.
for i, v := range c.Network.Interface.PortBindings {
proto := strings.ToUpper(i.Proto())
if proto != "TCP" && proto != "UDP" {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
}
if len(v) > 1 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
}
for _, v2 := range v {
var (
iPort, ePort int
err error
)
if len(v2.HostIP) != 0 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
}
if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
}
if iPort, err = strconv.Atoi(i.Port()); err != nil {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
}
if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
}
pbs = append(pbs,
portBinding{ExternalPort: ePort,
InternalPort: iPort,
Protocol: proto})
}
}
// TODO Windows: TP3 workaround. Allow the user to override the name of
// the Container NAT device through an environment variable. This will
// ultimately be a global daemon parameter on Windows, similar to -b
// for the name of the virtual switch (aka bridge).
cn := os.Getenv("DOCKER_CONTAINER_NAT")
if len(cn) == 0 {
cn = defaultContainerNAT
}
dev := device{
DeviceType: "Network",
Connection: &networkConnection{
NetworkName: c.Network.Interface.Bridge,
// TODO Windows: Fixme, next line. Needs HCS fix.
EnableNat: false,
//.........这里部分代码省略.........
示例13: Run
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
inListen, outListen, errListen *npipe.PipeListener
)
// Make sure the client isn't asking for options which aren't supported
err = checkSupportedOptions(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
cu := &containerInit{
SystemType: "Container",
Name: c.ID,
IsDummy: dummyMode,
VolumePath: c.Rootfs,
IgnoreFlushesDuringBoot: c.FirstStart,
LayerFolderPath: c.LayerFolder,
}
for i := 0; i < len(c.LayerPaths); i++ {
cu.Layers = append(cu.Layers, layer{
Id: hcsshim.NewGUID(c.LayerPaths[i]).ToString(),
Path: c.LayerPaths[i],
})
}
if c.Network.Interface != nil {
dev := device{
DeviceType: "Network",
Connection: &networkConnection{
NetworkName: c.Network.Interface.Bridge,
EnableNat: false,
},
}
if c.Network.Interface.MacAddress != "" {
windowsStyleMAC := strings.Replace(
c.Network.Interface.MacAddress, ":", "-", -1)
dev.Settings = networkSettings{
MacAddress: windowsStyleMAC,
}
}
logrus.Debugf("Virtual switch '%s', mac='%s'", c.Network.Interface.Bridge, c.Network.Interface.MacAddress)
cu.Devices = append(cu.Devices, dev)
} else {
logrus.Debugln("No network interface")
}
configurationb, err := json.Marshal(cu)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
configuration := string(configurationb)
err = hcsshim.CreateComputeSystem(c.ID, configuration)
if err != nil {
logrus.Debugln("Failed to create temporary container ", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
// Start the container
logrus.Debugln("Starting container ", c.ID)
err = hcsshim.StartComputeSystem(c.ID)
if err != nil {
logrus.Errorf("Failed to start compute system: %s", err)
return execdriver.ExitStatus{ExitCode: -1}, err
}
defer func() {
// Stop the container
if terminateMode {
logrus.Debugf("Terminating container %s", c.ID)
if err := hcsshim.TerminateComputeSystem(c.ID); err != nil {
// IMPORTANT: Don't fail if fails to change state. It could already
// have been stopped through kill().
// Otherwise, the docker daemon will hang in job wait()
logrus.Warnf("Ignoring error from TerminateComputeSystem %s", err)
}
} else {
logrus.Debugf("Shutting down container %s", c.ID)
if err := hcsshim.ShutdownComputeSystem(c.ID); err != nil {
// IMPORTANT: Don't fail if fails to change state. It could already
// have been stopped through kill().
// Otherwise, the docker daemon will hang in job wait()
logrus.Warnf("Ignoring error from ShutdownComputeSystem %s", err)
}
}
}()
// We use a different pipe name between real and dummy mode in the HCS
var serverPipeFormat, clientPipeFormat string
if dummyMode {
clientPipeFormat = `\\.\pipe\docker-run-%[1]s-%[2]s`
//.........这里部分代码省略.........