本文整理汇总了Golang中github.com/docker/docker/engine.Job.Error方法的典型用法代码示例。如果您正苦于以下问题:Golang Job.Error方法的具体用法?Golang Job.Error怎么用?Golang Job.Error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/docker/docker/engine.Job
的用法示例。
在下文中一共展示了Job.Error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Allocate
// Allocate a network interface
func Allocate(job *engine.Job) engine.Status {
var (
ip *net.IP
err error
id = job.Args[0]
requestedIP = net.ParseIP(job.Getenv("RequestedIP"))
)
if requestedIP != nil {
ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP)
} else {
ip, err = ipallocator.RequestIP(bridgeNetwork, nil)
}
if err != nil {
return job.Error(err)
}
out := engine.Env{}
out.Set("IP", ip.String())
out.Set("Mask", bridgeNetwork.Mask.String())
out.Set("Gateway", bridgeNetwork.IP.String())
out.Set("Bridge", bridgeIface)
size, _ := bridgeNetwork.Mask.Size()
out.SetInt("IPPrefixLen", size)
currentInterfaces.Set(id, &networkInterface{
IP: *ip,
})
out.WriteTo(job.Stdout)
return engine.StatusOK
}
示例2: ContainerExport
func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
if len(job.Args) != 1 {
return job.Errorf("Usage: %s container_id", job.Name)
}
name := job.Args[0]
container, err := daemon.Get(name)
if err != nil {
return job.Error(err)
}
data, err := container.Export()
if err != nil {
return job.Errorf("%s: %s", name, err)
}
defer data.Close()
// Stream the entire contents of the container (basically a volatile snapshot)
if _, err := io.Copy(job.Stdout, data); err != nil {
return job.Errorf("%s: %s", name, err)
}
// FIXME: factor job-specific LogEvent to engine.Job.Run()
container.LogEvent("export")
return engine.StatusOK
}
示例3: ContainerCopy
func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
if len(job.Args) != 2 {
return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
}
var (
name = job.Args[0]
resource = job.Args[1]
)
if container := srv.daemon.Get(name); container != nil {
data, err := container.Copy(resource)
if err != nil {
return job.Error(err)
}
defer data.Close()
if _, err := io.Copy(job.Stdout, data); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
return job.Errorf("No such container: %s", name)
}
示例4: ServeApi
// ServeApi loops through all of the protocols sent in to docker and spawns
// off a go routine to setup a serving http.Server for each.
func ServeApi(job *engine.Job) engine.Status {
if len(job.Args) == 0 {
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
}
var (
protoAddrs = job.Args
chErrors = make(chan error, len(protoAddrs))
)
activationLock = make(chan struct{})
for _, protoAddr := range protoAddrs {
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
}
go func() {
log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
}()
}
for i := 0; i < len(protoAddrs); i += 1 {
err := <-chErrors
if err != nil {
return job.Error(err)
}
}
return engine.StatusOK
}
示例5: CmdLookup
// CmdLookup return an image encoded in JSON
func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
if len(job.Args) != 1 {
return job.Errorf("usage: %s NAME", job.Name)
}
name := job.Args[0]
if image, err := s.LookupImage(name); err == nil && image != nil {
if job.GetenvBool("raw") {
b, err := image.RawJson()
if err != nil {
return job.Error(err)
}
job.Stdout.Write(b)
return engine.StatusOK
}
out := &engine.Env{}
out.Set("Id", image.ID)
out.Set("Parent", image.Parent)
out.Set("Comment", image.Comment)
out.SetAuto("Created", image.Created)
out.Set("Container", image.Container)
out.SetJson("ContainerConfig", image.ContainerConfig)
out.Set("DockerVersion", image.DockerVersion)
out.Set("Author", image.Author)
out.SetJson("Config", image.Config)
out.Set("Architecture", image.Architecture)
out.Set("Os", image.OS)
out.SetInt64("Size", image.Size)
if _, err = out.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
return job.Errorf("No such image: %s", name)
}
示例6: ContainerStart
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
if len(job.Args) < 1 {
return job.Errorf("Usage: %s container_id", job.Name)
}
var (
name = job.Args[0]
container = daemon.Get(name)
)
if container == nil {
return job.Errorf("No such container: %s", name)
}
if container.IsRunning() {
return job.Errorf("Container already started")
}
// If no environment was set, then no hostconfig was passed.
// This is kept for backward compatibility - hostconfig should be passed when
// creating a container, not during start.
if len(job.Environ()) > 0 {
hostConfig := runconfig.ContainerHostConfigFromJob(job)
if err := daemon.setHostConfig(container, hostConfig); err != nil {
return job.Error(err)
}
}
if err := container.Start(); err != nil {
container.LogEvent("die")
return job.Errorf("Cannot start container %s: %s", name, err)
}
return engine.StatusOK
}
示例7: ContainerRename
func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
if len(job.Args) != 2 {
return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name)
}
oldName := job.Args[0]
newName := job.Args[1]
container, err := daemon.Get(oldName)
if err != nil {
return job.Error(err)
}
oldName = container.Name
container.Lock()
defer container.Unlock()
if newName, err = daemon.reserveName(container.ID, newName); err != nil {
return job.Errorf("Error when allocating new name: %s", err)
}
container.Name = newName
if err := daemon.containerGraph.Delete(oldName); err != nil {
return job.Errorf("Failed to delete container %q: %v", oldName, err)
}
return engine.StatusOK
}
示例8: ContainerStart
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
if len(job.Args) < 1 {
return job.Errorf("Usage: %s container_id", job.Name)
}
var (
name = job.Args[0]
container = daemon.Get(name)
)
if container == nil {
return job.Errorf("No such container: %s", name)
}
if container.IsRunning() {
return job.Errorf("Container already started")
}
// If no environment was set, then no hostconfig was passed.
if len(job.Environ()) > 0 {
hostConfig := runconfig.ContainerHostConfigFromJob(job)
if err := daemon.setHostConfig(container, hostConfig); err != nil {
return job.Error(err)
}
}
if err := container.Start(); err != nil {
return job.Errorf("Cannot start container %s: %s", name, err)
}
return engine.StatusOK
}
示例9: CmdInfo
func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
images, _ := daemon.Graph().Map()
var imgcount int
if images == nil {
imgcount = 0
} else {
imgcount = len(images)
}
kernelVersion := "<unknown>"
if kv, err := kernel.GetKernelVersion(); err == nil {
kernelVersion = kv.String()
}
operatingSystem := "<unknown>"
if s, err := operatingsystem.GetOperatingSystem(); err == nil {
operatingSystem = s
}
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
utils.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
initPath := utils.DockerInitPath("")
if initPath == "" {
// if that fails, we'll just return the path from the daemon
initPath = daemon.SystemInitPath()
}
cjob := job.Eng.Job("subscribers_count")
env, _ := cjob.Stdout.AddEnv()
if err := cjob.Run(); err != nil {
return job.Error(err)
}
v := &engine.Env{}
v.SetInt("Containers", len(daemon.List()))
v.SetInt("Images", imgcount)
v.Set("Driver", daemon.GraphDriver().String())
v.SetJson("DriverStatus", daemon.GraphDriver().Status())
v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit)
v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit)
v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled)
v.SetBool("Debug", os.Getenv("DEBUG") != "")
v.SetInt("NFd", utils.GetTotalUsedFds())
v.SetInt("NGoroutines", runtime.NumGoroutine())
v.Set("ExecutionDriver", daemon.ExecutionDriver().Name())
v.SetInt("NEventsListener", env.GetInt("count"))
v.Set("KernelVersion", kernelVersion)
v.Set("OperatingSystem", operatingSystem)
v.Set("IndexServerAddress", registry.IndexServerAddress())
v.Set("InitSha1", dockerversion.INITSHA1)
v.Set("InitPath", initPath)
v.SetList("Sockets", daemon.Sockets)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
示例10: ContainerStart
func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
if len(job.Args) < 1 {
return job.Errorf("Usage: %s container_id", job.Name)
}
var (
name = job.Args[0]
daemon = srv.daemon
container = daemon.Get(name)
)
if container == nil {
return job.Errorf("No such container: %s", name)
}
if container.State.IsRunning() {
return job.Errorf("Container already started")
}
// If no environment was set, then no hostconfig was passed.
if len(job.Environ()) > 0 {
hostConfig := runconfig.ContainerHostConfigFromJob(job)
if err := srv.setHostConfig(container, hostConfig); err != nil {
return job.Error(err)
}
}
if err := container.Start(); err != nil {
return job.Errorf("Cannot start container %s: %s", name, err)
}
srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
return engine.StatusOK
}
示例11: ContainerInspect
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
if len(job.Args) != 1 {
return job.Errorf("usage: %s NAME", job.Name)
}
name := job.Args[0]
if container := daemon.Get(name); container != nil {
container.Lock()
defer container.Unlock()
if job.GetenvBool("raw") {
b, err := json.Marshal(&struct {
*Container
HostConfig *runconfig.HostConfig
}{container, container.hostConfig})
if err != nil {
return job.Error(err)
}
job.Stdout.Write(b)
return engine.StatusOK
}
out := &engine.Env{}
out.SetJson("Id", container.ID)
out.SetAuto("Created", container.Created)
out.SetJson("Path", container.Path)
out.SetList("Args", container.Args)
out.SetJson("Config", container.Config)
out.SetJson("State", container.State)
out.Set("Image", container.ImageID)
out.SetJson("NetworkSettings", container.NetworkSettings)
out.Set("ResolvConfPath", container.ResolvConfPath)
out.Set("HostnamePath", container.HostnamePath)
out.Set("HostsPath", container.HostsPath)
out.SetJson("Name", container.Name)
out.SetInt("RestartCount", container.RestartCount)
out.Set("Driver", container.Driver)
out.Set("ExecDriver", container.ExecDriver)
out.Set("MountLabel", container.MountLabel)
out.Set("ProcessLabel", container.ProcessLabel)
out.SetJson("Volumes", container.Volumes)
out.SetJson("VolumesRW", container.VolumesRW)
out.SetJson("AppArmorProfile", container.AppArmorProfile)
out.SetList("ExecIDs", container.GetExecIDs())
if children, err := daemon.Children(container.Name); err == nil {
for linkAlias, child := range children {
container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
}
}
out.SetJson("HostConfig", container.hostConfig)
container.hostConfig.Links = nil
if _, err := out.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
return job.Errorf("No such container: %s", name)
}
示例12: ContainerRm
func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
if len(job.Args) != 1 {
return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
}
name := job.Args[0]
removeVolume := job.GetenvBool("removeVolume")
removeLink := job.GetenvBool("removeLink")
forceRemove := job.GetenvBool("forceRemove")
container := daemon.Get(name)
if container == nil {
return job.Errorf("No such container: %s", name)
}
if removeLink {
name, err := GetFullContainerName(name)
if err != nil {
job.Error(err)
}
parent, n := path.Split(name)
if parent == "/" {
return job.Errorf("Conflict, cannot remove the default name of the container")
}
pe := daemon.ContainerGraph().Get(parent)
if pe == nil {
return job.Errorf("Cannot get parent %s for name %s", parent, name)
}
parentContainer := daemon.Get(pe.ID())
if parentContainer != nil {
parentContainer.DisableLink(n)
}
if err := daemon.ContainerGraph().Delete(name); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
if container != nil {
if container.IsRunning() {
if forceRemove {
if err := container.Kill(); err != nil {
return job.Errorf("Could not kill running container, cannot remove - %v", err)
}
} else {
return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
}
}
if err := daemon.Destroy(container); err != nil {
return job.Errorf("Cannot destroy container %s: %s", name, err)
}
container.LogEvent("destroy")
if removeVolume {
daemon.DeleteVolumes(container.VolumePaths())
}
}
return engine.StatusOK
}
示例13: Events
func (srv *Server) Events(job *engine.Job) engine.Status {
if len(job.Args) != 0 {
return job.Errorf("Usage: %s", job.Name)
}
var (
since = job.GetenvInt64("since")
until = job.GetenvInt64("until")
timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now()))
)
// If no until, disable timeout
if until == 0 {
timeout.Stop()
}
listener := make(chan utils.JSONMessage)
srv.eventPublisher.Subscribe(listener)
defer srv.eventPublisher.Unsubscribe(listener)
// When sending an event JSON serialization errors are ignored, but all
// other errors lead to the eviction of the listener.
sendEvent := func(event *utils.JSONMessage) error {
if b, err := json.Marshal(event); err == nil {
if _, err = job.Stdout.Write(b); err != nil {
return err
}
}
return nil
}
job.Stdout.Write(nil)
// Resend every event in the [since, until] time interval.
if since != 0 {
for _, event := range srv.GetEvents() {
if event.Time >= since && (event.Time <= until || until == 0) {
if err := sendEvent(&event); err != nil {
return job.Error(err)
}
}
}
}
for {
select {
case event, ok := <-listener:
if !ok {
return engine.StatusOK
}
if err := sendEvent(&event); err != nil {
return job.Error(err)
}
case <-timeout.C:
return engine.StatusOK
}
}
}
示例14: ContainerCreate
func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
var name string
if len(job.Args) == 1 {
name = job.Args[0]
} else if len(job.Args) > 1 {
return job.Errorf("Usage: %s", job.Name)
}
config := runconfig.ContainerConfigFromJob(job)
if config.Memory != 0 && config.Memory < 4194304 {
return job.Errorf("Minimum memory limit allowed is 4MB")
}
if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
config.Memory = 0
}
if config.Memory > 0 && !daemon.SystemConfig().SwapLimit {
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
if config.Memory > 0 && config.MemorySwap > 0 && config.MemorySwap < config.Memory {
return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
}
if config.Memory == 0 && config.MemorySwap > 0 {
return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
}
var hostConfig *runconfig.HostConfig
if job.EnvExists("HostConfig") {
hostConfig = runconfig.ContainerHostConfigFromJob(job)
} else {
// Older versions of the API don't provide a HostConfig.
hostConfig = nil
}
container, buildWarnings, err := daemon.Create(config, hostConfig, name)
if err != nil {
if daemon.Graph().IsNotExist(err) {
_, tag := parsers.ParseRepositoryTag(config.Image)
if tag == "" {
tag = graph.DEFAULTTAG
}
return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
}
return job.Error(err)
}
if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
job.Errorf("IPv4 forwarding is disabled.\n")
}
container.LogEvent("create")
job.Printf("%s\n", container.ID)
for _, warning := range buildWarnings {
job.Errorf("%s\n", warning)
}
return engine.StatusOK
}
示例15: CmdPull
func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
if n := len(job.Args); n != 1 && n != 2 {
return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
}
var (
localName = job.Args[0]
tag string
sf = utils.NewStreamFormatter(job.GetenvBool("json"))
authConfig = ®istry.AuthConfig{}
metaHeaders map[string][]string
)
if len(job.Args) > 1 {
tag = job.Args[1]
}
job.GetenvJson("authConfig", authConfig)
job.GetenvJson("metaHeaders", &metaHeaders)
c, err := s.poolAdd("pull", localName+":"+tag)
if err != nil {
if c != nil {
// Another pull of the same repository is already taking place; just wait for it to finish
job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
<-c
return engine.StatusOK
}
return job.Error(err)
}
defer s.poolRemove("pull", localName+":"+tag)
// Resolve the Repository name from fqn to endpoint + name
hostname, remoteName, err := registry.ResolveRepositoryName(localName)
if err != nil {
return job.Error(err)
}
endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
if err != nil {
return job.Error(err)
}
r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
if err != nil {
return job.Error(err)
}
if endpoint == registry.IndexServerAddress() {
// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
localName = remoteName
}
if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
return job.Error(err)
}
return engine.StatusOK
}