本文整理匯總了Golang中github.com/hyperhq/runv/lib/glog.Infof函數的典型用法代碼示例。如果您正苦於以下問題:Golang Infof函數的具體用法?Golang Infof怎麽用?Golang Infof使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Infof函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Trap
// Trap sets up a simplified signal "trap", appropriate for common
// behavior expected from a vanilla unix command-line tool in general
// (and the Docker engine in particular).
//
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
// skipped and the process is terminated immediately (allows force quit of stuck daemon)
// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
//
func Trap(cleanup func()) {
c := make(chan os.Signal, 1)
// we will handle INT, TERM, QUIT here
signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}
gosignal.Notify(c, signals...)
go func() {
interruptCount := uint32(0)
for sig := range c {
go func(sig os.Signal) {
glog.Infof("Processing signal '%v'", sig)
switch sig {
case os.Interrupt, syscall.SIGTERM:
if atomic.LoadUint32(&interruptCount) < 3 {
// Initiate the cleanup only once
if atomic.AddUint32(&interruptCount, 1) == 1 {
// Call the provided cleanup handler
cleanup()
os.Exit(0)
} else {
return
}
} else {
// 3 SIGTERM/INT signals received; force exit without cleanup
glog.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
}
case syscall.SIGQUIT:
DumpStacks()
glog.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
}
//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
os.Exit(128 + int(sig.(syscall.Signal)))
}(sig)
}
}()
}
示例2: CreateVolume
func CreateVolume(poolName, volName, dev_id string, size int, restore bool) error {
glog.Infof("/dev/mapper/%s", volName)
if _, err := os.Stat("/dev/mapper/" + volName); err == nil {
return nil
}
if restore == false {
parms := fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"create_thin %s\"", poolName, dev_id)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
}
parms := fmt.Sprintf("dmsetup create %s --table \"0 %d thin /dev/mapper/%s %s\"", volName, size/512, poolName, dev_id)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
if restore == false {
parms = fmt.Sprintf("mkfs.ext4 \"/dev/mapper/%s\"", volName)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
}
return nil
}
示例3: VmMountLayer
func (d *Driver) VmMountLayer(id string) error {
if d.daemon == nil {
if err := d.Setup(); err != nil {
return err
}
}
var (
diffSrc = fmt.Sprintf("%s/diff/%s", d.RootPath(), id)
volDst = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)
)
podstring, err := MakeMountPod("mac-vm-disk-mount-layer", "puller:latest", id, diffSrc, volDst)
if err != nil {
return err
}
podId := fmt.Sprintf("pull-%s", utils.RandStr(10, "alpha"))
vm, ok := d.daemon.VmList[d.pullVm]
if !ok {
return fmt.Errorf("can not find VM(%s)", d.pullVm)
}
if vm.Status == types.S_VM_IDLE {
code, cause, err := d.daemon.StartPod(podId, podstring, d.pullVm, nil, false, true, types.VM_KEEP_AFTER_SHUTDOWN)
if err != nil {
glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
d.daemon.KillVm(d.pullVm)
return err
}
vm := d.daemon.VmList[d.pullVm]
// wait for cmd finish
_, _, ret3, err := vm.GetVmChan()
if err != nil {
glog.Error(err.Error())
return err
}
subVmStatus := ret3.(chan *types.VmResponse)
var vmResponse *types.VmResponse
for {
vmResponse = <-subVmStatus
if vmResponse.VmId == d.pullVm {
if vmResponse.Code == types.E_POD_FINISHED {
glog.Infof("Got E_POD_FINISHED code response")
break
}
}
}
d.daemon.PodList[podId].Vm = d.pullVm
// release pod from VM
code, cause, err = d.daemon.StopPod(podId, "no")
if err != nil {
glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
d.daemon.KillVm(d.pullVm)
return err
}
d.daemon.CleanPod(podId)
} else {
glog.Errorf("pull vm should not be associated")
}
return nil
}
示例4: DumpStacks
func DumpStacks() {
buf := make([]byte, 16384)
buf = buf[:runtime.Stack(buf, true)]
// Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine
// traces won't show up in the log.
glog.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
}
示例5: CmdPodStart
func (daemon *Daemon) CmdPodStart(job *engine.Job) error {
// we can only support 1024 Pods
if daemon.GetRunningPodNum() >= 1024 {
return fmt.Errorf("Pod full, the maximum Pod is 1024!")
}
podId := job.Args[0]
vmId := job.Args[1]
glog.Infof("pod:%s, vm:%s", podId, vmId)
// Do the status check for the given pod
if _, ok := daemon.PodList[podId]; !ok {
return fmt.Errorf("The pod(%s) can not be found, please create it first", podId)
}
var lazy bool = hypervisor.HDriver.SupportLazyMode() && vmId == ""
code, cause, err := daemon.StartPod(podId, "", vmId, nil, lazy, false, types.VM_KEEP_NONE)
if err != nil {
glog.Error(err.Error())
return err
}
// Prepare the VM status to client
v := &engine.Env{}
v.Set("ID", vmId)
v.SetInt("Code", code)
v.Set("Cause", cause)
if _, err := v.WriteTo(job.Stdout); err != nil {
return err
}
return nil
}
示例6: diff
func diff(id, parent string) (diff archive.Archive, err error) {
// create pod
// start or replace pod
glog.Infof("Diff between %s and %s", id, parent)
layerFs := "/tmp/test1"
if parent == "" {
archive, err := archive.Tar(layerFs, archive.Uncompressed)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
return err
}), nil
}
parentFs := "/tmp/test2"
changes, err := archive.ChangesDirs(layerFs, parentFs)
if err != nil {
return nil, err
}
archive, err := archive.ExportChanges(layerFs, changes)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
return err
}), nil
}
示例7: InitDeviceContext
// InitDeviceContext will init device info in context
func (ctx *VmContext) InitDeviceContext(spec *pod.UserPod, wg *sync.WaitGroup,
cInfo []*ContainerInfo, vInfo []*VolumeInfo) {
ctx.lock.Lock()
defer ctx.lock.Unlock()
for i := 0; i < ctx.InterfaceCount; i++ {
ctx.progress.adding.networks[i] = true
}
if cInfo == nil {
cInfo = []*ContainerInfo{}
}
if vInfo == nil {
vInfo = []*VolumeInfo{}
}
ctx.initVolumeMap(spec)
if glog.V(3) {
for i, c := range cInfo {
glog.Infof("#%d Container Info:", i)
b, err := json.MarshalIndent(c, "...|", " ")
if err == nil {
glog.Info("\n", string(b))
}
}
}
containers := make([]VmContainer, len(spec.Containers))
for i, container := range spec.Containers {
ctx.initContainerInfo(i, &containers[i], &container)
ctx.setContainerInfo(i, &containers[i], cInfo[i])
if spec.Tty {
containers[i].Tty = ctx.attachId
ctx.attachId++
ctx.ptys.ttys[containers[i].Tty] = newAttachments(i, true)
}
}
ctx.vmSpec = &VmPod{
Hostname: spec.Name,
Containers: containers,
Interfaces: nil,
Routes: nil,
ShareDir: ShareDirTag,
}
for _, vol := range vInfo {
ctx.setVolumeInfo(vol)
}
ctx.userSpec = spec
ctx.wg = wg
}
示例8: DomainDeath_cgo
//export DomainDeath_cgo
func DomainDeath_cgo(domid C.uint32_t) {
defer func() { recover() }() //in case the vmContext or channel has been released
dom := (uint32)(domid)
glog.Infof("got xen hypervisor message: domain %d quit", dom)
if vm, ok := globalDriver.domains[dom]; ok {
glog.V(1).Infof("Domain %d managed by xen driver, try close it")
delete(globalDriver.domains, dom)
vm.Hub <- &hypervisor.VmExit{}
HyperDomainCleanup(globalDriver.Ctx, vm.DCtx.(*XenContext).ev)
}
}
示例9: StartVm
func (daemon *Daemon) StartVm(vmId string, cpu, mem int, lazy bool, keep int) (*hypervisor.Vm, error) {
b := &hypervisor.BootConfig{
CPU: cpu,
Memory: mem,
Kernel: daemon.Kernel,
Initrd: daemon.Initrd,
Bios: daemon.Bios,
Cbfs: daemon.Cbfs,
Vbox: daemon.VboxImage,
}
vm := daemon.NewVm(vmId, cpu, mem, lazy, keep)
err := vm.Launch(b)
if err != nil {
return nil, err
}
_, r1, r2, err1 := vm.GetVmChan()
if err1 != nil {
return nil, err1
}
vmStatus := r1.(chan *types.VmResponse)
subVmStatus := r2.(chan *types.VmResponse)
go func(interface{}) {
defer func() {
err := recover()
if err != nil {
glog.Warning("panic during send shutdown message to channel")
}
}()
for {
vmResponse := <-vmStatus
subVmStatus <- vmResponse
}
}(subVmStatus)
var vmResponse *types.VmResponse
for {
vmResponse = <-subVmStatus
glog.V(1).Infof("Get the response from VM, VM id is %s, response code is %d!", vmResponse.VmId, vmResponse.Code)
if vmResponse.VmId == vmId {
if vmResponse.Code == types.E_VM_RUNNING {
glog.Infof("Got E_VM_RUNNING code response")
break
} else {
break
}
}
}
if vmResponse.Code != types.E_VM_RUNNING {
return nil, fmt.Errorf("Vbox does not start successfully")
}
return vm, nil
}
示例10: Run
func (proxy *TCPProxy) Run() {
quit := make(chan bool)
defer close(quit)
for {
client, err := proxy.listener.Accept()
if err != nil {
glog.Infof("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
return
}
go proxy.clientLoop(client.(*net.TCPConn), quit)
}
}
示例11: VmAssociate
func VmAssociate(vmId string, hub chan VmEvent, client chan *types.VmResponse,
wg *sync.WaitGroup, pack []byte) {
if glog.V(1) {
glog.Infof("VM %s trying to reload with serialized data: %s", vmId, string(pack))
}
pinfo, err := vmDeserialize(pack)
if err != nil {
client <- &types.VmResponse{
VmId: vmId,
Code: types.E_BAD_REQUEST,
Cause: err.Error(),
}
return
}
if pinfo.Id != vmId {
client <- &types.VmResponse{
VmId: vmId,
Code: types.E_BAD_REQUEST,
Cause: "VM ID mismatch",
}
return
}
context, err := pinfo.vmContext(hub, client, wg)
if err != nil {
client <- &types.VmResponse{
VmId: vmId,
Code: types.E_BAD_REQUEST,
Cause: err.Error(),
}
return
}
client <- &types.VmResponse{
VmId: vmId,
Code: types.E_OK,
}
context.DCtx.Associate(context)
go waitPts(context)
go connectToInit(context)
if glog.V(1) {
go waitConsoleOutput(context)
}
context.Become(stateRunning, "RUNNING")
context.loop()
}
示例12: maintainer
// MAINTAINER some text <[email protected]>
//
// Sets the maintainer metadata.
func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return fmt.Errorf("MAINTAINER requires exactly one argument")
}
if err := b.BuilderFlags.Parse(); err != nil {
return err
}
b.maintainer = args[0]
glog.Infof("MAINTAINER is %s", args[0])
return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
}
示例13: Run
func (proxy *UDPProxy) Run() {
readBuf := make([]byte, UDPBufSize)
for {
read, from, err := proxy.listener.ReadFromUDP(readBuf)
if err != nil {
// NOTE: Apparently ReadFrom doesn't return
// ECONNREFUSED like Read do (see comment in
// UDPProxy.replyLoop)
if !isClosedError(err) {
glog.Infof("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
}
break
}
fromKey := newConnTrackKey(from)
proxy.connTrackLock.Lock()
proxyConn, hit := proxy.connTrackTable[*fromKey]
if !hit {
proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr)
if err != nil {
glog.Infof("Can't proxy a datagram to udp/%s: %s", proxy.backendAddr, err)
proxy.connTrackLock.Unlock()
continue
}
proxy.connTrackTable[*fromKey] = proxyConn
go proxy.replyLoop(proxyConn, from, fromKey)
}
proxy.connTrackLock.Unlock()
for i := 0; i != read; {
written, err := proxyConn.Write(readBuf[i:read])
if err != nil {
glog.Infof("Can't proxy a datagram to udp/%s: %s", proxy.backendAddr, err)
break
}
i += written
}
}
}
示例14: register
// register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
if container.daemon != nil || daemon.Exists(container.ID) {
return fmt.Errorf("Container is already loaded")
}
if err := validateID(container.ID); err != nil {
return err
}
if err := daemon.ensureName(container); err != nil {
return err
}
if daemon == nil {
glog.Error("daemon can not be nil")
return fmt.Errorf("daemon can not be nil")
}
container.daemon = daemon
// Attach to stdout and stderr
container.stderr = broadcastwriter.New()
container.stdout = broadcastwriter.New()
// Attach to stdin
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
} else {
container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
// done
daemon.containers.Add(container.ID, container)
// don't update the Suffixarray if we're starting up
// we'll waste time if we update it for every container
daemon.idIndex.Add(container.ID)
if container.IsRunning() {
glog.Infof("killing old running container %s", container.ID)
container.SetStopped(&ExitStatus{ExitCode: 0})
if err := container.Unmount(); err != nil {
glog.V(1).Infof("unmount error %s", err)
}
if err := container.ToDisk(); err != nil {
glog.V(1).Infof("saving stopped state to disk %s", err)
}
}
return nil
}
示例15: Read
func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
if r.client == nil || r.request == nil {
return 0, fmt.Errorf("client and request can't be nil\n")
}
isFreshRequest := false
if r.lastRange != 0 && r.currentResponse == nil {
readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
r.request.Header.Set("Range", readRange)
time.Sleep(5 * time.Second)
}
if r.currentResponse == nil {
r.currentResponse, err = r.client.Do(r.request)
isFreshRequest = true
}
if err != nil && r.failures+1 != r.maxFailures {
r.cleanUpResponse()
r.failures++
time.Sleep(5 * time.Duration(r.failures) * time.Second)
return 0, nil
} else if err != nil {
r.cleanUpResponse()
return 0, err
}
if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
r.cleanUpResponse()
return 0, io.EOF
} else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest {
r.cleanUpResponse()
return 0, fmt.Errorf("the server doesn't support byte ranges")
}
if r.totalSize == 0 {
r.totalSize = r.currentResponse.ContentLength
} else if r.totalSize <= 0 {
r.cleanUpResponse()
return 0, fmt.Errorf("failed to auto detect content length")
}
n, err = r.currentResponse.Body.Read(p)
r.lastRange += int64(n)
if err != nil {
r.cleanUpResponse()
}
if err != nil && err != io.EOF {
glog.Infof("encountered error during pull and clearing it before resume: %s", err)
err = nil
}
return n, err
}