本文整理汇总了Golang中github.com/hashicorp/nomad/helper/discover.NomadExecutable函数的典型用法代码示例。如果您正苦于以下问题:Golang NomadExecutable函数的具体用法?Golang NomadExecutable怎么用?Golang NomadExecutable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NomadExecutable函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestDockerDriver_Handle
func TestDockerDriver_Handle(t *testing.T) {
t.Parallel()
bin, err := discover.NomadExecutable()
if err != nil {
t.Fatalf("got an err: %v", err)
}
f, _ := ioutil.TempFile(os.TempDir(), "")
defer f.Close()
defer os.Remove(f.Name())
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "syslog", f.Name()),
}
exec, pluginClient, err := createExecutor(pluginConfig, os.Stdout, &config.Config{})
if err != nil {
t.Fatalf("got an err: %v", err)
}
defer pluginClient.Kill()
h := &DockerHandle{
version: "version",
imageID: "imageid",
executor: exec,
pluginClient: pluginClient,
containerID: "containerid",
killTimeout: 5 * time.Nanosecond,
maxKillTimeout: 15 * time.Nanosecond,
doneCh: make(chan bool),
waitCh: make(chan *cstructs.WaitResult, 1),
}
actual := h.ID()
expected := fmt.Sprintf("DOCKER:{\"Version\":\"version\",\"ImageID\":\"imageid\",\"ContainerID\":\"containerid\",\"KillTimeout\":5,\"MaxKillTimeout\":15,\"PluginConfig\":{\"Pid\":%d,\"AddrNet\":\"unix\",\"AddrName\":\"%s\"}}",
pluginClient.ReattachConfig().Pid, pluginClient.ReattachConfig().Addr.String())
if actual != expected {
t.Errorf("Expected `%s`, found `%s`", expected, actual)
}
}
示例2: Start
func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig JavaDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
// Set the host environment variables.
filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
d.taskEnv.AppendHostEnvvars(filter)
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
if driverConfig.JarPath == "" {
return nil, fmt.Errorf("jar_path must be specified")
}
args := []string{}
// Look for jvm options
if len(driverConfig.JvmOpts) != 0 {
d.logger.Printf("[DEBUG] driver.java: found JVM options: %s", driverConfig.JvmOpts)
args = append(args, driverConfig.JvmOpts...)
}
// Build the argument list.
args = append(args, "-jar", driverConfig.JarPath)
if len(driverConfig.Args) != 0 {
args = append(args, driverConfig.Args...)
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
// Set the context
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "java",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
ChrootEnv: d.config.ChrootEnv,
Task: task,
}
if err := execIntf.SetContext(executorCtx); err != nil {
pluginClient.Kill()
return nil, fmt.Errorf("failed to set executor context: %v", err)
}
absPath, err := GetAbsolutePath("java")
if err != nil {
return nil, err
}
execCmd := &executor.ExecCommand{
Cmd: absPath,
Args: args,
FSIsolation: true,
ResourceLimits: true,
User: getExecutorUser(task),
}
ps, err := execIntf.LaunchCmd(execCmd)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.java: started process with pid: %v", ps.Pid)
// Return a driver handle
maxKill := d.DriverContext.config.MaxKillTimeout
h := &javaHandle{
pluginClient: pluginClient,
executor: execIntf,
userPid: ps.Pid,
isolationConfig: ps.IsolationConfig,
taskDir: taskDir,
allocDir: ctx.AllocDir,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
version: d.config.Version,
logger: d.logger,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
d.logger.Printf("[ERR] driver.java: error registering services with consul for task: %q: %v", task.Name, err)
}
go h.run()
//.........这里部分代码省略.........
示例3: Start
//.........这里部分代码省略.........
if task.Resources.MemoryMB == 0 {
return nil, fmt.Errorf("Memory limit cannot be zero")
}
if task.Resources.CPU == 0 {
return nil, fmt.Errorf("CPU limit cannot be zero")
}
// Add memory isolator
cmdArgs = append(cmdArgs, fmt.Sprintf("--memory=%vM", int64(task.Resources.MemoryMB)*bytesToMB))
// Add CPU isolator
cmdArgs = append(cmdArgs, fmt.Sprintf("--cpu=%vm", int64(task.Resources.CPU)))
// Add DNS servers
for _, ip := range driverConfig.DNSServers {
if err := net.ParseIP(ip); err == nil {
msg := fmt.Errorf("invalid ip address for container dns server %q", ip)
d.logger.Printf("[DEBUG] driver.rkt: %v", msg)
return nil, msg
} else {
cmdArgs = append(cmdArgs, fmt.Sprintf("--dns=%s", ip))
}
}
// set DNS search domains
for _, domain := range driverConfig.DNSSearchDomains {
cmdArgs = append(cmdArgs, fmt.Sprintf("--dns-search=%s", domain))
}
// Add user passed arguments.
if len(driverConfig.Args) != 0 {
parsed := d.taskEnv.ParseAndReplace(driverConfig.Args)
// Need to start arguments with "--"
if len(parsed) > 0 {
cmdArgs = append(cmdArgs, "--")
}
for _, arg := range parsed {
cmdArgs = append(cmdArgs, fmt.Sprintf("%v", arg))
}
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "rkt",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
}
absPath, err := GetAbsolutePath("rkt")
if err != nil {
return nil, err
}
ps, err := execIntf.LaunchCmd(&executor.ExecCommand{
Cmd: absPath,
Args: cmdArgs,
User: task.User,
}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.rkt: started ACI %q with: %v", img, cmdArgs)
maxKill := d.DriverContext.config.MaxKillTimeout
h := &rktHandle{
pluginClient: pluginClient,
executor: execIntf,
executorPid: ps.Pid,
allocDir: ctx.AllocDir,
logger: d.logger,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
doneCh: make(chan struct{}),
waitCh: make(chan *cstructs.WaitResult, 1),
}
if h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.rkt: error registering services for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
示例4: Start
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig DockerDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
if err := driverConfig.Init(); err != nil {
return nil, err
}
if err := driverConfig.Validate(); err != nil {
return nil, err
}
cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Initialize docker API clients
client, waitClient, err := d.dockerClients()
if err != nil {
return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
}
if err := d.createImage(&driverConfig, client, taskDir); err != nil {
return nil, fmt.Errorf("failed to create image: %v", err)
}
image := driverConfig.ImageName
// Now that we have the image we can get the image id
dockerImage, err := client.InspectImage(image)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
}
d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Task: task,
Driver: "docker",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
PortLowerBound: d.config.ClientMinPort,
PortUpperBound: d.config.ClientMaxPort,
}
ss, err := exec.LaunchSyslogServer(executorCtx)
if err != nil {
return nil, fmt.Errorf("failed to start syslog collector: %v", err)
}
config, err := d.createContainer(ctx, task, &driverConfig, ss.Addr)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
pluginClient.Kill()
return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
}
// Create a container
container, err := client.CreateContainer(config)
if err != nil {
// If the container already exists because of a previous failure we'll
// try to purge it and re-create it.
if strings.Contains(err.Error(), "container already exists") {
// Get the ID of the existing container so we can delete it
containers, err := client.ListContainers(docker.ListContainersOptions{
// The image might be in use by a stopped container, so check everything
All: true,
Filters: map[string][]string{
"name": []string{config.Name},
},
})
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed to query list of containers matching name:%s", config.Name)
pluginClient.Kill()
return nil, fmt.Errorf("Failed to query list of containers: %s", err)
}
// Couldn't find any matching containers
if len(containers) == 0 {
d.logger.Printf("[ERR] driver.docker: failed to get id for container %s: %#v", config.Name, containers)
pluginClient.Kill()
return nil, fmt.Errorf("Failed to get id for container %s", config.Name)
}
//.........这里部分代码省略.........
示例5: spawnDaemon
// spawnDaemon executes a double fork to start the user command with proper
// isolation. Stores the child process for use in Wait.
func (e *LinuxExecutor) spawnDaemon() error {
bin, err := discover.NomadExecutable()
if err != nil {
return fmt.Errorf("Failed to determine the nomad executable: %v", err)
}
// Serialize the cmd and the cgroup configuration so it can be passed to the
// sub-process.
var buffer bytes.Buffer
enc := json.NewEncoder(&buffer)
c := command.DaemonConfig{
Cmd: e.cmd.Cmd,
Chroot: e.taskDir,
StdoutFile: filepath.Join(e.taskDir, allocdir.TaskLocal, fmt.Sprintf("%v.stdout", e.taskName)),
StderrFile: filepath.Join(e.taskDir, allocdir.TaskLocal, fmt.Sprintf("%v.stderr", e.taskName)),
StdinFile: "/dev/null",
}
if err := enc.Encode(c); err != nil {
return fmt.Errorf("Failed to serialize daemon configuration: %v", err)
}
// Create a pipe to capture Stdout.
pr, pw, err := os.Pipe()
if err != nil {
return err
}
e.spawnOutputWriter = pw
e.spawnOutputReader = pr
// Call ourselves using a hidden flag. The new instance of nomad will join
// the passed cgroup, forkExec the cmd, and output status codes through
// Stdout.
escaped := strconv.Quote(buffer.String())
spawn := exec.Command(bin, "spawn-daemon", escaped)
spawn.Stdout = e.spawnOutputWriter
// Capture its Stdin.
spawnStdIn, err := spawn.StdinPipe()
if err != nil {
return err
}
if err := spawn.Start(); err != nil {
fmt.Errorf("Failed to call spawn-daemon on nomad executable: %v", err)
}
// Join the spawn-daemon to the cgroup.
if e.groups != nil {
manager := cgroupFs.Manager{}
manager.Cgroups = e.groups
// Apply will place the current pid into the tasks file for each of the
// created cgroups:
// /sys/fs/cgroup/memory/user/1000.user/4.session/<uuid>/tasks
//
// Apply requires superuser permissions, and may fail if Nomad is not run with
// the required permissions
if err := manager.Apply(spawn.Process.Pid); err != nil {
errs := new(multierror.Error)
errs = multierror.Append(errs, fmt.Errorf("Failed to join spawn-daemon to the cgroup (config => %+v): %v", manager.Cgroups, err))
if err := sendAbortCommand(spawnStdIn); err != nil {
errs = multierror.Append(errs, err)
}
return errs
}
}
// Tell it to start.
if err := sendStartCommand(spawnStdIn); err != nil {
return err
}
// Parse the response.
dec := json.NewDecoder(e.spawnOutputReader)
var resp command.SpawnStartStatus
if err := dec.Decode(&resp); err != nil {
return fmt.Errorf("Failed to parse spawn-daemon start response: %v", err)
}
if resp.ErrorMsg != "" {
return fmt.Errorf("Failed to execute user command: %s", resp.ErrorMsg)
}
e.spawnChild = *spawn
return nil
}
示例6: Start
//.........这里部分代码省略.........
for _, port := range network.DynamicPorts {
// By default we will map the allocated port 1:1 to the container
var containerPort string
if mapped, ok := driverConfig.PortMap[port.Label]; ok {
containerPort = mapped
} else {
// If the user doesn't have mapped a port using port_map, driver stops running container.
return nil, fmt.Errorf("port_map is not set. When you defined port in the resources, you need to configure port_map.")
}
hostPortStr := strconv.Itoa(port.Value)
d.logger.Printf("[DEBUG] driver.rkt: exposed port %s", containerPort)
// Add port option to rkt run arguments. rkt allows multiple port args
cmdArgs = append(cmdArgs, fmt.Sprintf("--port=%s:%s", containerPort, hostPortStr))
}
}
// Add user passed arguments.
if len(driverConfig.Args) != 0 {
parsed := d.taskEnv.ParseAndReplace(driverConfig.Args)
// Need to start arguments with "--"
if len(parsed) > 0 {
cmdArgs = append(cmdArgs, "--")
}
for _, arg := range parsed {
cmdArgs = append(cmdArgs, fmt.Sprintf("%v", arg))
}
}
// Set the host environment variables.
filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
d.taskEnv.AppendHostEnvvars(filter)
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "rkt",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
}
if err := execIntf.SetContext(executorCtx); err != nil {
pluginClient.Kill()
return nil, fmt.Errorf("failed to set executor context: %v", err)
}
absPath, err := GetAbsolutePath(rktCmd)
if err != nil {
return nil, err
}
execCmd := &executor.ExecCommand{
Cmd: absPath,
Args: cmdArgs,
User: task.User,
}
ps, err := execIntf.LaunchCmd(execCmd)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.rkt: started ACI %q with: %v", img, cmdArgs)
maxKill := d.DriverContext.config.MaxKillTimeout
h := &rktHandle{
pluginClient: pluginClient,
executor: execIntf,
executorPid: ps.Pid,
allocDir: ctx.AllocDir,
logger: d.logger,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.rkt: error registering services for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
示例7: Start
func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig ExecDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
// Get the tasks local directory.
taskName := d.DriverContext.taskName
taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Get the command to be ran
command := driverConfig.Command
if err := validateCommand(command, "args"); err != nil {
return nil, err
}
// Check if an artificat is specified and attempt to download it
source, ok := task.Config["artifact_source"]
if ok && source != "" {
// Proceed to download an artifact to be executed.
_, err := getter.GetArtifact(
taskDir,
driverConfig.ArtifactSource,
driverConfig.Checksum,
d.logger,
)
if err != nil {
return nil, err
}
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
AllocDir: ctx.AllocDir,
TaskName: task.Name,
TaskResources: task.Resources,
LogConfig: task.LogConfig,
}
ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: command, Args: driverConfig.Args}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, fmt.Errorf("error starting process via the plugin: %v", err)
}
d.logger.Printf("[DEBUG] driver.raw_exec: started process with pid: %v", ps.Pid)
// Return a driver handle
h := &rawExecHandle{
pluginClient: pluginClient,
executor: exec,
userPid: ps.Pid,
killTimeout: d.DriverContext.KillTimeout(task),
allocDir: ctx.AllocDir,
version: d.config.Version,
logger: d.logger,
doneCh: make(chan struct{}),
waitCh: make(chan *cstructs.WaitResult, 1),
}
go h.run()
return h, nil
}
示例8: Start
// Run an existing Qemu image. Start() will pull down an existing, valid Qemu
// image and save it to the Drivers Allocation Dir
func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig QemuDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
if len(driverConfig.PortMap) > 1 {
return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config")
}
// Get the image source
vmPath := driverConfig.ImagePath
if vmPath == "" {
return nil, fmt.Errorf("image_path must be set")
}
vmID := filepath.Base(vmPath)
// Get the tasks local directory.
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Parse configuration arguments
// Create the base arguments
accelerator := "tcg"
if driverConfig.Accelerator != "" {
accelerator = driverConfig.Accelerator
}
// TODO: Check a lower bounds, e.g. the default 128 of Qemu
mem := fmt.Sprintf("%dM", task.Resources.MemoryMB)
absPath, err := GetAbsolutePath("qemu-system-x86_64")
if err != nil {
return nil, err
}
args := []string{
absPath,
"-machine", "type=pc,accel=" + accelerator,
"-name", vmID,
"-m", mem,
"-drive", "file=" + vmPath,
"-nographic",
}
// Add pass through arguments to qemu executable. A user can specify
// these arguments in driver task configuration. These arguments are
// passed directly to the qemu driver as command line options.
// For example, args = [ "-nodefconfig", "-nodefaults" ]
// This will allow a VM with embedded configuration to boot successfully.
args = append(args, driverConfig.Args...)
// Check the Resources required Networks to add port mappings. If no resources
// are required, we assume the VM is a purely compute job and does not require
// the outside world to be able to reach it. VMs ran without port mappings can
// still reach out to the world, but without port mappings it is effectively
// firewalled
protocols := []string{"udp", "tcp"}
if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 {
// Loop through the port map and construct the hostfwd string, to map
// reserved ports to the ports listenting in the VM
// Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080
var forwarding []string
taskPorts := task.Resources.Networks[0].MapLabelToValues(nil)
for label, guest := range driverConfig.PortMap[0] {
host, ok := taskPorts[label]
if !ok {
return nil, fmt.Errorf("Unknown port label %q", label)
}
for _, p := range protocols {
forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest))
}
}
if len(forwarding) != 0 {
args = append(args,
"-netdev",
fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")),
"-device", "virtio-net,netdev=user.0",
)
}
}
// If using KVM, add optimization args
if accelerator == "kvm" {
args = append(args,
"-enable-kvm",
"-cpu", "host",
// Do we have cores information available to the Driver?
// "-smp", fmt.Sprintf("%d", cores),
)
}
d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " "))
bin, err := discover.NomadExecutable()
if err != nil {
//.........这里部分代码省略.........
示例9: Start
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig DockerDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
image := driverConfig.ImageName
if err := driverConfig.Validate(); err != nil {
return nil, err
}
if task.Resources == nil {
return nil, fmt.Errorf("Resources are not specified")
}
if task.Resources.MemoryMB == 0 {
return nil, fmt.Errorf("Memory limit cannot be zero")
}
if task.Resources.CPU == 0 {
return nil, fmt.Errorf("CPU limit cannot be zero")
}
cleanupContainer := d.config.ReadBoolDefault("docker.cleanup.container", true)
cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)
// Initialize docker API client
client, err := d.dockerClient()
if err != nil {
return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
}
repo, tag := docker.ParseRepositoryTag(image)
// Make sure tag is always explicitly set. We'll default to "latest" if it
// isn't, which is the expected behavior.
if tag == "" {
tag = "latest"
}
var dockerImage *docker.Image
// We're going to check whether the image is already downloaded. If the tag
// is "latest" we have to check for a new version every time so we don't
// bother to check and cache the id here. We'll download first, then cache.
if tag != "latest" {
dockerImage, err = client.InspectImage(image)
}
// Download the image
if dockerImage == nil {
pullOptions := docker.PullImageOptions{
Repository: repo,
Tag: tag,
}
authOptions := docker.AuthConfiguration{}
if len(driverConfig.Auth) != 0 {
authOptions = docker.AuthConfiguration{
Username: driverConfig.Auth[0].Username,
Password: driverConfig.Auth[0].Password,
Email: driverConfig.Auth[0].Email,
ServerAddress: driverConfig.Auth[0].ServerAddress,
}
}
if authConfig := d.config.Read("docker.auth.config"); authConfig != "" {
if f, err := os.Open(authConfig); err == nil {
defer f.Close()
if authConfigurations, err := docker.NewAuthConfigurations(f); err == nil {
if authConfiguration, ok := authConfigurations.Configs[repo]; ok {
authOptions = authConfiguration
}
}
}
}
err = client.PullImage(pullOptions, authOptions)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed pulling container %s:%s: %s", repo, tag, err)
return nil, fmt.Errorf("Failed to pull `%s`: %s", image, err)
}
d.logger.Printf("[DEBUG] driver.docker: docker pull %s:%s succeeded", repo, tag)
// Now that we have the image we can get the image id
dockerImage, err = client.InspectImage(image)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
}
}
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-syslog-collector.out", task.Name))
pluginConfig := &plugin.ClientConfig{
//.........这里部分代码省略.........
示例10: Spawn
// Spawn does a double-fork to start and isolate the user command. It takes a
// call-back that is invoked with the pid of the intermediary process. If the
// call back returns an error, the user command is not started and the spawn is
// cancelled. This can be used to put the process into a cgroup or jail and
// cancel starting the user process if that was not successful. An error is
// returned if the call-back returns an error or the user-command couldn't be
// started.
func (s *Spawner) Spawn(cb func(pid int) error) error {
bin, err := discover.NomadExecutable()
if err != nil {
return fmt.Errorf("Failed to determine the nomad executable: %v", err)
}
exitFile, err := os.OpenFile(s.StateFile, os.O_CREATE|os.O_WRONLY, 0666)
defer exitFile.Close()
if err != nil {
return fmt.Errorf("Error opening file to store exit status: %v", err)
}
config, err := s.spawnConfig()
if err != nil {
return err
}
spawn := exec.Command(bin, "spawn-daemon", config)
// Capture stdout
spawnStdout, err := spawn.StdoutPipe()
defer spawnStdout.Close()
if err != nil {
return fmt.Errorf("Failed to capture spawn-daemon stdout: %v", err)
}
// Capture stdin.
spawnStdin, err := spawn.StdinPipe()
defer spawnStdin.Close()
if err != nil {
return fmt.Errorf("Failed to capture spawn-daemon stdin: %v", err)
}
if err := spawn.Start(); err != nil {
return fmt.Errorf("Failed to call spawn-daemon on nomad executable: %v", err)
}
if cb != nil {
cbErr := cb(spawn.Process.Pid)
if cbErr != nil {
errs := new(multierror.Error)
errs = multierror.Append(errs, cbErr)
if err := s.sendAbortCommand(spawnStdin); err != nil {
errs = multierror.Append(errs, err)
}
return errs
}
}
if err := s.sendStartCommand(spawnStdin); err != nil {
return err
}
respCh := make(chan command.SpawnStartStatus, 1)
errCh := make(chan error, 1)
go func() {
var resp command.SpawnStartStatus
dec := json.NewDecoder(spawnStdout)
if err := dec.Decode(&resp); err != nil {
errCh <- fmt.Errorf("Failed to parse spawn-daemon start response: %v", err)
}
respCh <- resp
}()
select {
case err := <-errCh:
return err
case resp := <-respCh:
if resp.ErrorMsg != "" {
return fmt.Errorf("Failed to execute user command: %s", resp.ErrorMsg)
}
s.UserPid = resp.UserPID
case <-time.After(5 * time.Second):
return fmt.Errorf("timed out waiting for response")
}
// Store the spawn process.
s.spawn = spawn.Process
s.SpawnPid = s.spawn.Pid
s.SpawnPpid = os.Getpid()
return nil
}
示例11: Start
func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig ExecDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
// Get the command to be ran
command := driverConfig.Command
if err := validateCommand(command, "args"); err != nil {
return nil, err
}
// Set the host environment variables.
filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
d.taskEnv.AppendHostEnvvars(filter)
// Get the task directory for storing the executor logs.
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "exec",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
}
ps, err := exec.LaunchCmd(&executor.ExecCommand{
Cmd: command,
Args: driverConfig.Args,
FSIsolation: true,
ResourceLimits: true,
User: getExecutorUser(task),
}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.exec: started process via plugin with pid: %v", ps.Pid)
// Return a driver handle
maxKill := d.DriverContext.config.MaxKillTimeout
h := &execHandle{
pluginClient: pluginClient,
userPid: ps.Pid,
executor: exec,
allocDir: ctx.AllocDir,
isolationConfig: ps.IsolationConfig,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
logger: d.logger,
version: d.config.Version,
doneCh: make(chan struct{}),
waitCh: make(chan *cstructs.WaitResult, 1),
}
if err := exec.SyncServices(consulContext(d.config, "")); err != nil {
d.logger.Printf("[ERR] driver.exec: error registering services with consul for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
示例12: Start
func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig JavaDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Proceed to download an artifact to be executed.
path, err := getter.GetArtifact(
taskDir,
driverConfig.ArtifactSource,
driverConfig.Checksum,
d.logger,
)
if err != nil {
return nil, err
}
jarName := filepath.Base(path)
args := []string{}
// Look for jvm options
if len(driverConfig.JvmOpts) != 0 {
d.logger.Printf("[DEBUG] driver.java: found JVM options: %s", driverConfig.JvmOpts)
args = append(args, driverConfig.JvmOpts...)
}
// Build the argument list.
args = append(args, "-jar", jarName)
if len(driverConfig.Args) != 0 {
args = append(args, driverConfig.Args...)
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
AllocDir: ctx.AllocDir,
TaskName: task.Name,
TaskResources: task.Resources,
FSIsolation: true,
ResourceLimits: true,
UnprivilegedUser: true,
}
ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: "java", Args: args}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, fmt.Errorf("error starting process via the plugin: %v", err)
}
d.logger.Printf("[DEBUG] driver.java: started process with pid: %v", ps.Pid)
// Return a driver handle
h := &javaHandle{
pluginClient: pluginClient,
executor: exec,
userPid: ps.Pid,
isolationConfig: ps.IsolationConfig,
taskDir: taskDir,
allocDir: ctx.AllocDir,
killTimeout: d.DriverContext.KillTimeout(task),
logger: d.logger,
doneCh: make(chan struct{}),
waitCh: make(chan *cstructs.WaitResult, 1),
}
go h.run()
return h, nil
}
示例13: Start
//.........这里部分代码省略.........
mem := fmt.Sprintf("%dM", task.Resources.MemoryMB)
args := []string{
"qemu-system-x86_64",
"-machine", "type=pc,accel=" + accelerator,
"-name", vmID,
"-m", mem,
"-drive", "file=" + vmPath,
"-nodefconfig",
"-nodefaults",
"-nographic",
}
// Check the Resources required Networks to add port mappings. If no resources
// are required, we assume the VM is a purely compute job and does not require
// the outside world to be able to reach it. VMs ran without port mappings can
// still reach out to the world, but without port mappings it is effectively
// firewalled
protocols := []string{"udp", "tcp"}
if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 {
// Loop through the port map and construct the hostfwd string, to map
// reserved ports to the ports listenting in the VM
// Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080
var forwarding []string
taskPorts := task.Resources.Networks[0].MapLabelToValues(nil)
for label, guest := range driverConfig.PortMap[0] {
host, ok := taskPorts[label]
if !ok {
return nil, fmt.Errorf("Unknown port label %q", label)
}
for _, p := range protocols {
forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest))
}
}
if len(forwarding) != 0 {
args = append(args,
"-netdev",
fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")),
"-device", "virtio-net,netdev=user.0",
)
}
}
// If using KVM, add optimization args
if accelerator == "kvm" {
args = append(args,
"-enable-kvm",
"-cpu", "host",
// Do we have cores information available to the Driver?
// "-smp", fmt.Sprintf("%d", cores),
)
}
d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " "))
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
AllocDir: ctx.AllocDir,
TaskName: task.Name,
TaskResources: task.Resources,
LogConfig: task.LogConfig,
}
ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: args[0], Args: args[1:]}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, fmt.Errorf("error starting process via the plugin: %v", err)
}
d.logger.Printf("[INFO] Started new QemuVM: %s", vmID)
// Create and Return Handle
h := &qemuHandle{
pluginClient: pluginClient,
executor: exec,
userPid: ps.Pid,
allocDir: ctx.AllocDir,
killTimeout: d.DriverContext.KillTimeout(task),
version: d.config.Version,
logger: d.logger,
doneCh: make(chan struct{}),
waitCh: make(chan *cstructs.WaitResult, 1),
}
go h.run()
return h, nil
}
示例14: Start
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
// Set environment variables.
d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath).
SetTaskLocalDir(allocdir.TaskLocalContainerPath).SetSecretsDir(allocdir.TaskSecretsContainerPath).Build()
driverConfig, err := NewDockerDriverConfig(task, d.taskEnv)
if err != nil {
return nil, err
}
cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Initialize docker API clients
client, waitClient, err := d.dockerClients()
if err != nil {
return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
}
if err := d.createImage(driverConfig, client, taskDir); err != nil {
return nil, err
}
image := driverConfig.ImageName
// Now that we have the image we can get the image id
dockerImage, err := client.InspectImage(image)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
}
d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Task: task,
Driver: "docker",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
PortLowerBound: d.config.ClientMinPort,
PortUpperBound: d.config.ClientMaxPort,
}
if err := exec.SetContext(executorCtx); err != nil {
pluginClient.Kill()
return nil, fmt.Errorf("failed to set executor context: %v", err)
}
// Only launch syslog server if we're going to use it!
syslogAddr := ""
if runtime.GOOS == "darwin" && len(driverConfig.Logging) == 0 {
d.logger.Printf("[DEBUG] driver.docker: disabling syslog driver as Docker for Mac workaround")
} else if len(driverConfig.Logging) == 0 || driverConfig.Logging[0].Type == "syslog" {
ss, err := exec.LaunchSyslogServer()
if err != nil {
pluginClient.Kill()
return nil, fmt.Errorf("failed to start syslog collector: %v", err)
}
syslogAddr = ss.Addr
}
config, err := d.createContainerConfig(ctx, task, driverConfig, syslogAddr)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
pluginClient.Kill()
return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
}
container, rerr := d.createContainer(config)
if rerr != nil {
d.logger.Printf("[ERR] driver.docker: failed to create container: %s", rerr)
pluginClient.Kill()
rerr.Err = fmt.Sprintf("Failed to create container: %s", rerr.Err)
return nil, rerr
}
d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)
// We don't need to start the container if the container is already running
// since we don't create containers which are already present on the host
// and are running
if !container.State.Running {
// Start the container
err := d.startContainer(container)
if err != nil {
//.........这里部分代码省略.........