本文整理匯總了Golang中github.com/docker/docker/engine.Job.Logf方法的典型用法代碼示例。如果您正苦於以下問題:Golang Job.Logf方法的具體用法?Golang Job.Logf怎麽用?Golang Job.Logf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/docker/docker/engine.Job
的用法示例。
在下文中一共展示了Job.Logf方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: InitServer
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
func InitServer(job *engine.Job) engine.Status {
job.Logf("Creating server")
cfg := daemonconfig.ConfigFromJob(job)
srv, err := NewServer(job.Eng, cfg)
if err != nil {
return job.Error(err)
}
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
for name, handler := range map[string]engine.Handler{
"build": srv.Build,
} {
if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
return job.Error(err)
}
}
// Install image-related commands from the image subsystem.
// See `graph/service.go`
if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
return job.Error(err)
}
// Install daemon-related commands from the daemon subsystem.
// See `daemon/`
if err := srv.daemon.Install(job.Eng); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
示例2: InitPidfile
func InitPidfile(job *engine.Job) engine.Status {
if len(job.Args) == 0 {
return job.Error(fmt.Errorf("no pidfile provided to initialize"))
}
job.Logf("Creating pidfile")
if err := utils.CreatePidFile(job.Args[0]); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
示例3: portMapping
func portMapping(job *engine.Job, remoteHost string, localPort int, remotePort int) {
localListener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", localPort))
if err != nil {
job.Errorf("\nnet.Listen failed: %v", err)
}
config := &ssh.ClientConfig{
User: "root",
Auth: []ssh.AuthMethod{
ssh.PublicKeys(getPrivateKeys(job)),
},
}
// Dial your ssh server.
conn, err := ssh.Dial("tcp", fmt.Sprintf("%s:22", remoteHost), config)
if err != nil {
job.Errorf("\nUnable to connect: %s with %s", err, remoteHost)
} else {
job.Logf("\nEstablish ssh tunnel with %s:22 %d:%d", remoteHost, localPort, remotePort)
}
defer conn.Close()
for {
// Setup localConn (type net.Conn)
localConnection, err := localListener.Accept()
if err != nil {
job.Errorf("\nListen.Accept failed: %v", err)
}
defer localConnection.Close()
remoteConnection, err := conn.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", remotePort))
if err != nil {
job.Errorf("\nUnable to register tcp forward: %v", err)
}
defer remoteConnection.Close()
go ioProxy(localConnection, remoteConnection)
}
defer func() {
if r := recover(); r != nil {
fmt.Println("Recovered in f", r)
}
}()
}
示例4: listContainer
func listContainer(job *engine.Job, configuration types.KraneConfiguration) <-chan *types.Ship {
v := url.Values{}
if all := job.GetenvBool("all"); all {
v.Set("all", strconv.FormatBool(all))
}
ch := make(chan *types.Ship, len(configuration.Production.Fleet.Ships()))
for _, ship := range configuration.Production.Fleet.Available() {
go func(ship types.Ship) {
cli := client.NewKraneClientApi(ship, false, job)
body, statusCode, err := readBody(cli.Call("GET", "/containers/json?"+v.Encode(), nil, false))
job.Logf("(%d) %s\n", statusCode, body)
if err != nil {
job.Logf("Error: %s", err.Error())
}
var resultShip types.Ship
if (statusCode >= 200) && (statusCode < 300) {
var containerList []types.Containers
json.Unmarshal(body, &containerList)
resultShip.Name = ship.Name
resultShip.Fqdn = ship.Fqdn
resultShip.Port = ship.Port
resultShip.State = "operational"
resultShip.Containers = containerList
fmt.Printf("%#v", resultShip)
ch <- &resultShip
} else {
ch <- nil
}
}(ship)
}
return ch
}
示例5: InitServer
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
func InitServer(job *engine.Job) engine.Status {
job.Logf("Creating server")
srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
if err != nil {
return job.Error(err)
}
job.Logf("Setting up signal traps")
c := make(chan os.Signal, 1)
signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
if os.Getenv("DEBUG") == "" {
signals = append(signals, syscall.SIGQUIT)
}
gosignal.Notify(c, signals...)
go func() {
interruptCount := uint32(0)
for sig := range c {
go func(sig os.Signal) {
log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
switch sig {
case os.Interrupt, syscall.SIGTERM:
// If the user really wants to interrupt, let him do so.
if atomic.LoadUint32(&interruptCount) < 3 {
atomic.AddUint32(&interruptCount, 1)
// Initiate the cleanup only once
if atomic.LoadUint32(&interruptCount) == 1 {
utils.RemovePidFile(srv.daemon.Config().Pidfile)
srv.Close()
} else {
return
}
} else {
log.Printf("Force shutdown of docker, interrupting cleanup\n")
}
case syscall.SIGQUIT:
}
os.Exit(128 + int(sig.(syscall.Signal)))
}(sig)
}
}()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
for name, handler := range map[string]engine.Handler{
"build": srv.Build,
"pull": srv.ImagePull,
"push": srv.ImagePush,
} {
if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
return job.Error(err)
}
}
// Install image-related commands from the image subsystem.
// See `graph/service.go`
if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
return job.Error(err)
}
// Install daemon-related commands from the daemon subsystem.
// See `daemon/`
if err := srv.daemon.Install(job.Eng); err != nil {
return job.Error(err)
}
srv.SetRunning(true)
return engine.StatusOK
}
示例6: InitDriver
func InitDriver(job *engine.Job) engine.Status {
var (
network *net.IPNet
enableIPTables = job.GetenvBool("EnableIptables")
icc = job.GetenvBool("InterContainerCommunication")
ipMasq = job.GetenvBool("EnableIpMasq")
ipForward = job.GetenvBool("EnableIpForward")
bridgeIP = job.Getenv("BridgeIP")
fixedCIDR = job.Getenv("FixedCIDR")
)
if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
defaultBindingIP = net.ParseIP(defaultIP)
}
bridgeIface = job.Getenv("BridgeIface")
usingDefaultBridge := false
if bridgeIface == "" {
usingDefaultBridge = true
bridgeIface = DefaultNetworkBridge
}
addr, err := networkdriver.GetIfaceAddr(bridgeIface)
if err != nil {
// If we're not using the default bridge, fail without trying to create it
if !usingDefaultBridge {
return job.Error(err)
}
// If the bridge interface is not found (or has no address), try to create it and/or add an address
if err := configureBridge(bridgeIP); err != nil {
return job.Error(err)
}
addr, err = networkdriver.GetIfaceAddr(bridgeIface)
if err != nil {
return job.Error(err)
}
network = addr.(*net.IPNet)
} else {
network = addr.(*net.IPNet)
// validate that the bridge ip matches the ip specified by BridgeIP
if bridgeIP != "" {
bip, _, err := net.ParseCIDR(bridgeIP)
if err != nil {
return job.Error(err)
}
if !network.IP.Equal(bip) {
return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip)
}
}
}
// Configure iptables for link support
if enableIPTables {
if err := setupIPTables(addr, icc, ipMasq); err != nil {
return job.Error(err)
}
}
if ipForward {
// Enable IPv4 forwarding
if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
}
}
// We can always try removing the iptables
if err := iptables.RemoveExistingChain("DOCKER"); err != nil {
return job.Error(err)
}
if enableIPTables {
chain, err := iptables.NewChain("DOCKER", bridgeIface)
if err != nil {
return job.Error(err)
}
portmapper.SetIptablesChain(chain)
}
bridgeNetwork = network
if fixedCIDR != "" {
_, subnet, err := net.ParseCIDR(fixedCIDR)
if err != nil {
return job.Error(err)
}
log.Debugf("Subnet: %v", subnet)
if err := ipallocator.RegisterSubnet(bridgeNetwork, subnet); err != nil {
return job.Error(err)
}
}
// https://github.com/docker/docker/issues/2768
job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP)
for name, f := range map[string]engine.Handler{
"allocate_interface": Allocate,
"release_interface": Release,
"allocate_port": AllocatePort,
"link": LinkContainers,
} {
//.........這裏部分代碼省略.........
示例7: AllocatePort
// Allocate an external port and map it to the interface
func AllocatePort(job *engine.Job) engine.Status {
var (
err error
ip = defaultBindingIP
id = job.Args[0]
hostIP = job.Getenv("HostIP")
hostPort = job.GetenvInt("HostPort")
containerPort = job.GetenvInt("ContainerPort")
proto = job.Getenv("Proto")
network = currentInterfaces.Get(id)
)
if hostIP != "" {
ip = net.ParseIP(hostIP)
if ip == nil {
return job.Errorf("Bad parameter: invalid host ip %s", hostIP)
}
}
// host ip, proto, and host port
var container net.Addr
switch proto {
case "tcp":
container = &net.TCPAddr{IP: network.IP, Port: containerPort}
case "udp":
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
default:
return job.Errorf("unsupported address type %s", proto)
}
//
// Try up to 10 times to get a port that's not already allocated.
//
// In the event of failure to bind, return the error that portmapper.Map
// yields.
//
var host net.Addr
for i := 0; i < MaxAllocatedPortAttempts; i++ {
if host, err = portmapper.Map(container, ip, hostPort); err == nil {
break
}
if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok {
// There is no point in immediately retrying to map an explicitly
// chosen port.
if hostPort != 0 {
job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error())
break
}
// Automatically chosen 'free' port failed to bind: move on the next.
job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String())
} else {
// some other error during mapping
job.Logf("Received an unexpected error during port allocation: %s", err.Error())
break
}
}
if err != nil {
return job.Error(err)
}
network.PortMappings = append(network.PortMappings, host)
out := engine.Env{}
switch netAddr := host.(type) {
case *net.TCPAddr:
out.Set("HostIP", netAddr.IP.String())
out.SetInt("HostPort", netAddr.Port)
case *net.UDPAddr:
out.Set("HostIP", netAddr.IP.String())
out.SetInt("HostPort", netAddr.Port)
}
if _, err := out.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
示例8: InitDriver
//.........這裏部分代碼省略.........
for _, addrv6 := range addrsv6 {
networkv6 = addrv6.(*net.IPNet)
if networkv6.IP.Equal(bip6) {
found = true
break
}
}
if !found {
return job.Errorf("bridge IPv6 does not match existing bridge configuration %s", bip6)
}
}
networkv4 = addrv4.(*net.IPNet)
log.Infof("enableIPv6 = %t", enableIPv6)
if enableIPv6 {
if len(addrsv6) == 0 {
return job.Error(errors.New("IPv6 enabled but no IPv6 detected"))
}
bridgeIPv6Addr = networkv6.IP
}
// Configure iptables for link support
if enableIPTables {
if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
return job.Error(err)
}
}
if ipForward {
// Enable IPv4 forwarding
if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
}
if fixedCIDRv6 != "" {
// Enable IPv6 forwarding
if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
job.Logf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
}
if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
job.Logf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
}
}
}
// We can always try removing the iptables
if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
return job.Error(err)
}
if enableIPTables {
_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
if err != nil {
return job.Error(err)
}
chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
if err != nil {
return job.Error(err)
}
portmapper.SetIptablesChain(chain)
}
bridgeIPv4Network = networkv4
if fixedCIDR != "" {
示例9: InitServer
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
func InitServer(job *engine.Job) engine.Status {
job.Logf("Creating server")
srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
if err != nil {
return job.Error(err)
}
job.Logf("Setting up signal traps")
c := make(chan os.Signal, 1)
signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
if os.Getenv("DEBUG") == "" {
signals = append(signals, syscall.SIGQUIT)
}
gosignal.Notify(c, signals...)
go func() {
interruptCount := uint32(0)
for sig := range c {
go func(sig os.Signal) {
log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
switch sig {
case os.Interrupt, syscall.SIGTERM:
// If the user really wants to interrupt, let him do so.
if atomic.LoadUint32(&interruptCount) < 3 {
atomic.AddUint32(&interruptCount, 1)
// Initiate the cleanup only once
if atomic.LoadUint32(&interruptCount) == 1 {
utils.RemovePidFile(srv.daemon.Config().Pidfile)
srv.Close()
} else {
return
}
} else {
log.Printf("Force shutdown of docker, interrupting cleanup\n")
}
case syscall.SIGQUIT:
}
os.Exit(128 + int(sig.(syscall.Signal)))
}(sig)
}
}()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
for name, handler := range map[string]engine.Handler{
"export": srv.ContainerExport,
"create": srv.ContainerCreate,
"stop": srv.ContainerStop,
"restart": srv.ContainerRestart,
"start": srv.ContainerStart,
"kill": srv.ContainerKill,
"pause": srv.ContainerPause,
"unpause": srv.ContainerUnpause,
"wait": srv.ContainerWait,
"tag": srv.ImageTag, // FIXME merge with "image_tag"
"resize": srv.ContainerResize,
"commit": srv.ContainerCommit,
"info": srv.DockerInfo,
"container_delete": srv.ContainerDestroy,
"image_export": srv.ImageExport,
"images": srv.Images,
"history": srv.ImageHistory,
"viz": srv.ImagesViz,
"container_copy": srv.ContainerCopy,
"attach": srv.ContainerAttach,
"logs": srv.ContainerLogs,
"changes": srv.ContainerChanges,
"top": srv.ContainerTop,
"load": srv.ImageLoad,
"build": srv.Build,
"pull": srv.ImagePull,
"import": srv.ImageImport,
"image_delete": srv.ImageDelete,
"events": srv.Events,
"push": srv.ImagePush,
"containers": srv.Containers,
} {
if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
return job.Error(err)
}
}
// Install image-related commands from the image subsystem.
// See `graph/service.go`
if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
return job.Error(err)
}
// Install daemon-related commands from the daemon subsystem.
// See `daemon/`
if err := srv.daemon.Install(job.Eng); err != nil {
return job.Error(err)
}
srv.SetRunning(true)
return engine.StatusOK
}
示例10: Tunnel
func Tunnel(job *engine.Job) engine.Status {
var fqdn string
var delayed string
if len(job.Args) == 2 {
fqdn = job.Args[0]
delayed = job.Args[1]
} else if len(job.Args) > 2 {
return job.Errorf("Usage: %s", job.Name)
}
configuration := job.Eng.Hack_GetGlobalVar("configuration").(types.KraneConfiguration)
if delayed == "true" {
job.Logf("\nWe are going to waiting 30 seconds to create ssh tunnel with %s", fqdn)
time.Sleep(30 * time.Second)
fleet, err := configuration.Driver.List(nil)
production := configuration.Production.Fleet.Find(fqdn)
if fleet.Find(fqdn).Id == "" {
job.Logf("Ship %s does not exist in cloud provider", fqdn)
return engine.StatusOK
} else if production.Id != "" && production.LocalPort > 0 {
job.Logf("Tunnel with Ship %s already exist", fqdn)
return engine.StatusOK
}
if err != nil {
job.Logf("\nUnable to get list of ships from %s", configuration.Driver.Name())
}
configuration.Production.Fleet.Append(fleet.Ships())
job.Eng.Hack_SetGlobalVar("configuration", configuration)
fmt.Printf("%#v", configuration.Production.Fleet)
}
if configuration.Production.HighPort == 0 {
configuration.Production.HighPort = 8000
job.Eng.Hack_SetGlobalVar("configuration", configuration)
}
ship := configuration.Production.Fleet.Find(fqdn)
if (ship.State == "operational") && (ship.LocalPort == 0) {
job.Logf("\nCreating ssh tunnel for %s\n", fqdn)
configuration.Production.HighPort = configuration.Production.HighPort + 1
ship.LocalPort = configuration.Production.HighPort
configuration.Production.Fleet.AppendShip(ship)
job.Eng.Hack_SetGlobalVar("configuration", configuration)
go portMapping(job, ship.Fqdn, ship.LocalPort, ship.Port)
return engine.StatusOK
} else {
job.Logf("\nGoing to queue job to create tunnel for %s\n", fqdn)
newjob := job.Eng.Job("ssh_tunnel", fqdn, "true")
if delayed == "true" {
newjob.Run()
} else {
go newjob.Run()
}
return engine.StatusOK
}
}