本文整理匯總了Golang中github.com/hyperhq/runv/lib/glog.Info函數的典型用法代碼示例。如果您正苦於以下問題:Golang Info函數的具體用法?Golang Info怎麽用?Golang Info使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Info函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: statePreparing
func statePreparing(ctx *VmContext, ev VmEvent) {
switch ev.Event() {
case EVENT_VM_EXIT, ERROR_INTERRUPTED:
glog.Info("VM exited before start...")
case COMMAND_SHUTDOWN, COMMAND_RELEASE:
glog.Info("got shutdown or release command, not started yet")
ctx.reportVmShutdown()
ctx.Become(nil, "NONE")
case COMMAND_EXEC:
ctx.execCmd(ev.(*ExecCommand))
case COMMAND_WINDOWSIZE:
cmd := ev.(*WindowSizeCommand)
ctx.setWindowSize(cmd.ClientTag, cmd.Size)
case COMMAND_RUN_POD, COMMAND_REPLACE_POD:
glog.Info("got spec, prepare devices")
if ok := ctx.lazyPrepareDevice(ev.(*RunPodCommand)); ok {
ctx.startSocks()
ctx.DCtx.(LazyDriverContext).LazyLaunch(ctx)
ctx.setTimeout(60)
ctx.Become(stateStarting, "STARTING")
} else {
glog.Warning("Fail to prepare devices, quit")
ctx.Become(nil, "None")
}
default:
glog.Warning("got event during pod initiating")
}
}
示例2: waitInitReady
func waitInitReady(ctx *VmContext) {
conn, err := UnixSocketConnect(ctx.HyperSockName)
if err != nil {
glog.Error("Cannot connect to hyper socket ", err.Error())
ctx.Hub <- &InitFailedEvent{
Reason: "Cannot connect to hyper socket " + err.Error(),
}
return
}
glog.Info("Wating for init messages...")
msg, err := readVmMessage(conn.(*net.UnixConn))
if err != nil {
glog.Error("read init message failed... ", err.Error())
ctx.Hub <- &InitFailedEvent{
Reason: "read init message failed... " + err.Error(),
}
conn.Close()
} else if msg.code == INIT_READY {
glog.Info("Get init ready message")
ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)}
go waitCmdToInit(ctx, conn.(*net.UnixConn))
} else {
glog.Warningf("Get init message %d", msg.code)
ctx.Hub <- &InitFailedEvent{
Reason: fmt.Sprintf("Get init message %d", msg.code),
}
conn.Close()
}
}
示例3: InitDriver
func InitDriver() *XenDriver {
if probeXend() {
glog.Info("xend is running, can not start with xl.")
return nil
}
ctx, res := HyperxlInitializeDriver()
if res != 0 {
glog.Info("failed to initialize xen context")
return nil
} else if ctx.Version < REQUIRED_VERSION {
glog.Info("Xen version is not new enough (%d), need 4.5 or higher", ctx.Version)
return nil
} else {
glog.V(1).Info("Xen capabilities: ", ctx.Capabilities)
hvm := false
caps := strings.Split(ctx.Capabilities, " ")
for _, cap := range caps {
if strings.HasPrefix(cap, "hvm-") {
hvm = true
break
}
}
if !hvm {
glog.Info("Xen installation does not support HVM, current capabilities: %s", ctx.Capabilities)
return nil
}
}
sigchan := make(chan os.Signal, 1)
go func() {
for {
_, ok := <-sigchan
if !ok {
break
}
glog.V(1).Info("got SIGCHLD, send msg to libxl")
HyperxlSigchldHandler(ctx.Ctx)
}
}()
signal.Notify(sigchan, syscall.SIGCHLD)
xd := &XenDriver{
Ctx: ctx.Ctx,
Logger: ctx.Logger,
Version: ctx.Version,
Capabilities: ctx.Capabilities,
}
xd.domains = make(map[uint32]*hypervisor.VmContext)
globalDriver = xd
return globalDriver
}
示例4: qmpCommander
func qmpCommander(handler chan QmpInteraction, conn *net.UnixConn, session *QmpSession, feedback chan QmpInteraction) {
glog.V(1).Info("Begin process command session")
for _, cmd := range session.commands {
msg, err := json.Marshal(*cmd)
if err != nil {
handler <- qmpFail("cannot marshal command", session.callback)
return
}
success := false
var qe *QmpError = nil
for repeat := 0; !success && repeat < 3; repeat++ {
if len(cmd.Scm) > 0 {
glog.V(1).Infof("send cmd with scm (%d bytes) (%d) %s", len(cmd.Scm), repeat+1, string(msg))
f, _ := conn.File()
fd := f.Fd()
syscall.Sendmsg(int(fd), msg, cmd.Scm, nil, 0)
} else {
glog.V(1).Infof("sending command (%d) %s", repeat+1, string(msg))
conn.Write(msg)
}
res, ok := <-feedback
if !ok {
glog.Info("QMP command result chan closed")
return
}
switch res.MessageType() {
case QMP_RESULT:
success = true
break
//success
case QMP_ERROR:
glog.Warning("got one qmp error")
qe = res.(*QmpError)
time.Sleep(1000 * time.Millisecond)
case QMP_INTERNAL_ERROR:
glog.Info("QMP quit... commander quit... ")
return
}
}
if !success {
handler <- qe.Finish(session.callback)
return
}
}
handler <- session.Finish()
return
}
示例5: waitConsoleOutput
func waitConsoleOutput(ctx *VmContext) {
conn, err := UnixSocketConnect(ctx.ConsoleSockName)
if err != nil {
glog.Error("failed to connected to ", ctx.ConsoleSockName, " ", err.Error())
return
}
glog.V(1).Info("connected to ", ctx.ConsoleSockName)
tc, err := telnet.NewConn(conn)
if err != nil {
glog.Error("fail to init telnet connection to ", ctx.ConsoleSockName, ": ", err.Error())
return
}
glog.V(1).Infof("connected %s as telnet mode.", ctx.ConsoleSockName)
cout := make(chan string, 128)
go TtyLiner(tc, cout)
for {
line, ok := <-cout
if ok {
glog.V(1).Info("[console] ", line)
} else {
glog.Info("console output end")
break
}
}
}
示例6: CmdPodRun
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
// we can only support 1024 Pods
if daemon.GetRunningPodNum() >= 1024 {
return fmt.Errorf("Pod full, the maximum Pod is 1024!")
}
var autoremove bool = false
podArgs := job.Args[0]
if job.Args[1] == "yes" {
autoremove = true
}
podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
glog.Info(podArgs)
var lazy bool = hypervisor.HDriver.SupportLazyMode()
code, cause, err := daemon.StartPod(podId, podArgs, "", nil, lazy, autoremove, types.VM_KEEP_NONE)
if err != nil {
glog.Error(err.Error())
return err
}
// Prepare the VM status to client
v := &engine.Env{}
v.Set("ID", podId)
v.SetInt("Code", code)
v.Set("Cause", cause)
if _, err := v.WriteTo(job.Stdout); err != nil {
return err
}
return nil
}
示例7: stateCleaning
func stateCleaning(ctx *VmContext, ev VmEvent) {
if processed := commonStateHandler(ctx, ev, false); processed {
} else if processed, success := deviceRemoveHandler(ctx, ev); processed {
if !success {
glog.Warning("fail to unplug devices for stop")
ctx.poweroffVM(true, "fail to unplug devices")
ctx.Become(stateDestroying, "DESTROYING")
} else if ctx.deviceReady() {
// ctx.reset()
// ctx.unsetTimeout()
// ctx.reportPodStopped()
// glog.V(1).Info("device ready, could run pod.")
// ctx.Become(stateInit, "INIT")
ctx.vm <- &DecodedMessage{
code: INIT_READY,
message: []byte{},
}
glog.V(1).Info("device ready, could run pod.")
}
} else if processed := initFailureHandler(ctx, ev); processed {
ctx.poweroffVM(true, "fail to unplug devices")
ctx.Become(stateDestroying, "DESTROYING")
} else {
switch ev.Event() {
case COMMAND_RELEASE:
glog.Info("vm cleaning to idle, got release, quit")
ctx.reportVmShutdown()
ctx.Become(stateDestroying, "DESTROYING")
case EVENT_VM_TIMEOUT:
glog.Warning("VM did not exit in time, try to stop it")
ctx.poweroffVM(true, "pod stopp/unplug timeout")
ctx.Become(stateDestroying, "DESTROYING")
case COMMAND_ACK:
ack := ev.(*CommandAck)
glog.V(1).Infof("[cleaning] Got reply to %d: '%s'", ack.reply, string(ack.msg))
if ack.reply == INIT_READY {
ctx.reset()
ctx.unsetTimeout()
ctx.reportPodStopped()
glog.Info("init has been acknowledged, could run pod.")
ctx.Become(stateInit, "INIT")
}
default:
glog.V(1).Info("got event message while cleaning")
}
}
}
示例8: InitDeviceContext
// InitDeviceContext will init device info in context
func (ctx *VmContext) InitDeviceContext(spec *pod.UserPod, wg *sync.WaitGroup,
cInfo []*ContainerInfo, vInfo []*VolumeInfo) {
ctx.lock.Lock()
defer ctx.lock.Unlock()
for i := 0; i < ctx.InterfaceCount; i++ {
ctx.progress.adding.networks[i] = true
}
if cInfo == nil {
cInfo = []*ContainerInfo{}
}
if vInfo == nil {
vInfo = []*VolumeInfo{}
}
ctx.initVolumeMap(spec)
if glog.V(3) {
for i, c := range cInfo {
glog.Infof("#%d Container Info:", i)
b, err := json.MarshalIndent(c, "...|", " ")
if err == nil {
glog.Info("\n", string(b))
}
}
}
containers := make([]VmContainer, len(spec.Containers))
for i, container := range spec.Containers {
ctx.initContainerInfo(i, &containers[i], &container)
ctx.setContainerInfo(i, &containers[i], cInfo[i])
if spec.Tty {
containers[i].Tty = ctx.attachId
ctx.attachId++
ctx.ptys.ttys[containers[i].Tty] = newAttachments(i, true)
}
}
ctx.vmSpec = &VmPod{
Hostname: spec.Name,
Containers: containers,
Interfaces: nil,
Routes: nil,
ShareDir: ShareDirTag,
}
for _, vol := range vInfo {
ctx.setVolumeInfo(vol)
}
ctx.userSpec = spec
ctx.wg = wg
}
示例9: stateDestroying
func stateDestroying(ctx *VmContext, ev VmEvent) {
if processed, _ := deviceRemoveHandler(ctx, ev); processed {
if closed := ctx.tryClose(); closed {
glog.Info("resources reclaimed, quit...")
}
} else {
switch ev.Event() {
case EVENT_VM_EXIT:
glog.Info("Got VM shutdown event")
ctx.unsetTimeout()
if closed := ctx.onVmExit(false); closed {
glog.Info("VM Context closed.")
}
case EVENT_VM_KILL:
glog.Info("Got VM force killed message")
ctx.unsetTimeout()
if closed := ctx.onVmExit(true); closed {
glog.Info("VM Context closed.")
}
case ERROR_INTERRUPTED:
glog.V(1).Info("Connection interrupted while destroying")
case COMMAND_RELEASE:
glog.Info("vm destroying, got release")
ctx.reportVmShutdown()
case EVENT_VM_TIMEOUT:
glog.Info("Device removing timeout")
ctx.Close()
default:
glog.Warning("got event during vm cleaning up")
}
}
}
示例10: Kill
func (qc *QemuContext) Kill(ctx *hypervisor.VmContext) {
defer func() {
err := recover()
if glog.V(1) && err != nil {
glog.Info("kill qemu, but channel has already been closed")
}
}()
qc.wdt <- "kill"
}
示例11: commonStateHandler
// state machine
func commonStateHandler(ctx *VmContext, ev VmEvent, hasPod bool) bool {
processed := true
switch ev.Event() {
case EVENT_VM_EXIT:
glog.Info("Got VM shutdown event, go to cleaning up")
ctx.unsetTimeout()
if closed := ctx.onVmExit(hasPod); !closed {
ctx.Become(stateDestroying, "DESTROYING")
}
case ERROR_INTERRUPTED:
glog.Info("Connection interrupted, quit...")
ctx.exitVM(true, "connection to VM broken", false, false)
ctx.onVmExit(hasPod)
case COMMAND_SHUTDOWN:
glog.Info("got shutdown command, shutting down")
ctx.exitVM(false, "", hasPod, ev.(*ShutdownCommand).Wait)
default:
processed = false
}
return processed
}
示例12: stateTerminating
func stateTerminating(ctx *VmContext, ev VmEvent) {
switch ev.Event() {
case EVENT_VM_EXIT:
glog.Info("Got VM shutdown event while terminating, go to cleaning up")
ctx.unsetTimeout()
if closed := ctx.onVmExit(true); !closed {
ctx.Become(stateDestroying, "DESTROYING")
}
case EVENT_VM_KILL:
glog.Info("Got VM force killed message, go to cleaning up")
ctx.unsetTimeout()
if closed := ctx.onVmExit(true); !closed {
ctx.Become(stateDestroying, "DESTROYING")
}
case COMMAND_RELEASE:
glog.Info("vm terminating, got release")
ctx.reportVmShutdown()
case COMMAND_ACK:
ack := ev.(*CommandAck)
glog.V(1).Infof("[Terminating] Got reply to %d: '%s'", ack.reply, string(ack.msg))
if ack.reply == INIT_DESTROYPOD {
glog.Info("POD destroyed ", string(ack.msg))
ctx.poweroffVM(false, "")
}
case ERROR_CMD_FAIL:
ack := ev.(*CommandError)
if ack.context.code == INIT_DESTROYPOD {
glog.Warning("Destroy pod failed")
ctx.poweroffVM(true, "Destroy pod failed")
}
case EVENT_VM_TIMEOUT:
glog.Warning("VM did not exit in time, try to stop it")
ctx.poweroffVM(true, "vm terminating timeout")
case ERROR_INTERRUPTED:
glog.V(1).Info("Connection interrupted while terminating")
default:
glog.V(1).Info("got event during terminating")
}
}
示例13: ptyConnect
func (pts *pseudoTtys) ptyConnect(ctx *VmContext, container int, session uint64, tty *TtyIO) {
pts.lock.Lock()
if ta, ok := pts.ttys[session]; ok {
ta.attach(tty)
} else {
pts.ttys[session] = newAttachmentsWithTty(container, false, tty)
}
pts.lock.Unlock()
if tty.Stdin != nil {
go func() {
buf := make([]byte, 32)
defer pts.Detach(ctx, session, tty)
defer func() { recover() }()
for {
nr, err := tty.Stdin.Read(buf)
if err != nil {
glog.Info("a stdin closed, ", err.Error())
return
} else if nr == 1 && buf[0] == ExitChar {
glog.Info("got stdin detach char, exit term")
return
}
glog.V(3).Infof("trying to input char: %d and %d chars", buf[0], nr)
mbuf := make([]byte, nr)
copy(mbuf, buf[:nr])
pts.channel <- &ttyMessage{
session: session,
message: mbuf[:nr],
}
}
}()
}
return
}
示例14: stateInit
func stateInit(ctx *VmContext, ev VmEvent) {
if processed := commonStateHandler(ctx, ev, false); processed {
//processed by common
} else if processed := initFailureHandler(ctx, ev); processed {
ctx.shutdownVM(true, "Fail during init environment")
ctx.Become(stateDestroying, "DESTROYING")
} else {
switch ev.Event() {
case EVENT_VM_START_FAILED:
glog.Error("VM did not start up properly, go to cleaning up")
ctx.reportVmFault("VM did not start up properly, go to cleaning up")
ctx.Close()
case EVENT_INIT_CONNECTED:
glog.Info("begin to wait vm commands")
ctx.reportVmRun()
case COMMAND_RELEASE:
glog.Info("no pod on vm, got release, quit.")
ctx.shutdownVM(false, "")
ctx.Become(stateDestroying, "DESTRYING")
ctx.reportVmShutdown()
case COMMAND_EXEC:
ctx.execCmd(ev.(*ExecCommand))
case COMMAND_WINDOWSIZE:
cmd := ev.(*WindowSizeCommand)
ctx.setWindowSize(cmd.ClientTag, cmd.Size)
case COMMAND_RUN_POD, COMMAND_REPLACE_POD:
glog.Info("got spec, prepare devices")
if ok := ctx.prepareDevice(ev.(*RunPodCommand)); ok {
ctx.setTimeout(60)
ctx.Become(stateStarting, "STARTING")
}
case COMMAND_GET_POD_IP:
ctx.reportPodIP()
default:
glog.Warning("got event during pod initiating")
}
}
}
示例15: launchQemu
// launchQemu run qemu and wait it's quit, includes
func launchQemu(qc *QemuContext, ctx *hypervisor.VmContext) {
qemu := qc.driver.executable
if qemu == "" {
ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find qemu executable"}
return
}
args := qc.arguments(ctx)
if glog.V(1) {
glog.Info("cmdline arguments: ", strings.Join(args, " "))
}
pipe := make([]int, 2)
err := syscall.Pipe(pipe)
if err != nil {
glog.Error("fail to create pipe")
ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "fail to create pipe"}
return
}
err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
if err != nil {
//fail to daemonize
glog.Error("try to start qemu failed")
ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
return
}
buf := make([]byte, 4)
nr, err := syscall.Read(pipe[0], buf)
if err != nil || nr != 4 {
glog.Error("try to start qemu failed")
ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
return
}
syscall.Close(pipe[1])
syscall.Close(pipe[0])
pid := binary.BigEndian.Uint32(buf[:nr])
glog.V(1).Infof("starting daemon with pid: %d", pid)
err = ctx.DCtx.(*QemuContext).watchPid(int(pid), ctx.Hub)
if err != nil {
glog.Error("watch qemu process failed")
ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "watch qemu process failed"}
return
}
}