本文整理汇总了Golang中github.com/docker/docker/utils.IsDebugEnabled函数的典型用法代码示例。如果您正苦于以下问题:Golang IsDebugEnabled函数的具体用法?Golang IsDebugEnabled怎么用?Golang IsDebugEnabled使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了IsDebugEnabled函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: initRouter
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
decoder := runconfig.ContainerDecoder{}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(d, decoder),
container.NewRouter(d, decoder),
image.NewRouter(d, decoder),
systemrouter.NewRouter(d, c),
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)),
swarmrouter.NewRouter(c),
pluginrouter.NewRouter(plugin.GetManager()),
}
if d.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(d, c))
}
if d.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
s.InitRouter(utils.IsDebugEnabled(), routers...)
}
示例2: reloadConfig
func (cli *DaemonCli) reloadConfig() {
reload := func(config *daemon.Config) {
// Reload the authorization plugin
cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins)
if err := cli.d.Reload(config); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if config.IsValueSet("debug") {
debugEnabled := utils.IsDebugEnabled()
switch {
case debugEnabled && !config.Debug: // disable debug
utils.DisableDebug()
cli.api.DisableProfiler()
case config.Debug && !debugEnabled: // enable debug
utils.EnableDebug()
cli.api.EnableProfiler()
}
}
}
if err := daemon.ReloadConfiguration(*cli.configFile, cli.flags, reload); err != nil {
logrus.Error(err)
}
}
示例3: initRouter
func initRouter(s *apiserver.Server, d *daemon.Daemon) {
s.InitRouter(utils.IsDebugEnabled(),
container.NewRouter(d),
image.NewRouter(d),
network.NewRouter(d),
systemrouter.NewRouter(d),
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)))
}
示例4: Reload
// Reload reads configuration changes and modifies the
// server according to those changes.
// Currently, only the --debug configuration is taken into account.
func (s *Server) Reload(config *daemon.Config) {
debugEnabled := utils.IsDebugEnabled()
switch {
case debugEnabled && !config.Debug: // disable debug
utils.DisableDebug()
s.routerSwapper.Swap(s.createMux())
case config.Debug && !debugEnabled: // enable debug
utils.EnableDebug()
s.routerSwapper.Swap(s.createMux())
}
}
示例5: Reload
// Reload reads configuration changes and modifies the
// server according to those changes.
// Currently, only the --debug configuration is taken into account.
func (s *Server) Reload(debug bool) {
debugEnabled := utils.IsDebugEnabled()
switch {
case debugEnabled && !debug: // disable debug
utils.DisableDebug()
s.routerSwapper.Swap(s.createMux())
case debug && !debugEnabled: // enable debug
utils.EnableDebug()
s.routerSwapper.Swap(s.createMux())
}
}
示例6: initRouter
func initRouter(s *apiserver.Server, d *daemon.Daemon) {
routers := []router.Router{
container.NewRouter(d),
image.NewRouter(d),
systemrouter.NewRouter(d),
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)),
}
if d.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(d))
}
s.InitRouter(utils.IsDebugEnabled(), routers...)
}
示例7: initRouter
func initRouter(s *apiserver.Server, d *daemon.Daemon) {
decoder := runconfig.ContainerDecoder{}
routers := []router.Router{
container.NewRouter(d, decoder),
image.NewRouter(d, decoder),
systemrouter.NewRouter(d),
volume.NewRouter(d),
build.NewRouter(d),
}
if d.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(d))
}
s.InitRouter(utils.IsDebugEnabled(), routers...)
}
示例8: initRouter
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
decoder := runconfig.ContainerDecoder{}
routers := []router.Router{
container.NewRouter(d, decoder),
image.NewRouter(d, decoder),
systemrouter.NewRouter(d, c),
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)),
swarmrouter.NewRouter(c),
}
if d.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(d, c))
}
routers = addExperimentalRouters(routers)
s.InitRouter(utils.IsDebugEnabled(), routers...)
}
示例9: createMux
// createMux initializes the main router the server uses.
func (s *Server) createMux() *mux.Router {
m := mux.NewRouter()
if utils.IsDebugEnabled() {
profilerSetup(m, "/debug/")
}
logrus.Debugf("Registering routers")
for _, apiRouter := range s.routers {
for _, r := range apiRouter.Routes() {
f := s.makeHTTPHandler(r.Handler())
logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
m.Path(r.Path()).Methods(r.Method()).Handler(f)
}
}
return m
}
示例10: initRouter
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
decoder := runconfig.ContainerDecoder{}
routers := []router.Router{}
// we need to add the checkpoint router before the container router or the DELETE gets masked
routers = addExperimentalRouters(routers, d, decoder)
routers = append(routers, []router.Router{
container.NewRouter(d, decoder),
image.NewRouter(d, decoder),
systemrouter.NewRouter(d, c),
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)),
swarmrouter.NewRouter(c),
}...)
if d.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(d, c))
}
s.InitRouter(utils.IsDebugEnabled(), routers...)
}
示例11: reloadConfig
func (cli *DaemonCli) reloadConfig() {
reload := func(config *daemon.Config) {
if err := cli.d.Reload(config); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if config.IsValueSet("debug") {
debugEnabled := utils.IsDebugEnabled()
switch {
case debugEnabled && !config.Debug: // disable debug
utils.DisableDebug()
cli.api.DisableProfiler()
case config.Debug && !debugEnabled: // enable debug
utils.EnableDebug()
cli.api.EnableProfiler()
}
}
}
if err := daemon.ReloadConfiguration(*cli.configFile, flag.CommandLine, reload); err != nil {
logrus.Error(err)
}
}
示例12: restore
func (daemon *Daemon) restore() error {
var (
debug = utils.IsDebugEnabled()
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
if !debug {
logrus.Info("Loading containers: start.")
}
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
containerCount := 0
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if !debug && logrus.GetLevel() == logrus.InfoLevel {
fmt.Print(".")
containerCount++
}
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
var migrateLegacyLinks bool
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for _, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
continue
}
}
}
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
rm := c.RestartManager(false)
if c.IsRunning() || c.IsPaused() {
if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
return
}
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// fixme: only if not running
// get list of containers we need to restart
if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
}
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
//.........这里部分代码省略.........
示例13: start
//.........这里部分代码省略.........
if err != nil {
logrus.Fatal(err)
}
// If we're binding to a TCP port, make sure that a container doesn't try to use it.
if proto == "tcp" {
if err := allocateDaemonPort(addr); err != nil {
logrus.Fatal(err)
}
}
logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
api.Accept(protoAddrParts[1], l...)
}
if err := migrateKey(); err != nil {
logrus.Fatal(err)
}
cli.TrustKeyPath = cli.commonFlags.TrustKey
registryService := registry.NewService(cli.Config.ServiceOptions)
containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...)
if err != nil {
logrus.Fatal(err)
}
d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote)
if err != nil {
if pfile != nil {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
}
}
logrus.Fatalf("Error starting daemon: %v", err)
}
logrus.Info("Daemon has completed initialization")
logrus.WithFields(logrus.Fields{
"version": dockerversion.Version,
"commit": dockerversion.GitCommit,
"graphdriver": d.GraphDriverName(),
}).Info("Docker daemon")
cli.initMiddlewares(api, serverConfig)
initRouter(api, d)
reload := func(config *daemon.Config) {
if err := d.Reload(config); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if config.IsValueSet("debug") {
debugEnabled := utils.IsDebugEnabled()
switch {
case debugEnabled && !config.Debug: // disable debug
utils.DisableDebug()
api.DisableProfiler()
case config.Debug && !debugEnabled: // enable debug
utils.EnableDebug()
api.EnableProfiler()
}
}
}
setupConfigReloadTrap(*cli.configFile, flags, reload)
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go api.Wait(serveAPIWait)
signal.Trap(func() {
api.Close()
<-serveAPIWait
shutdownDaemon(d, 15)
if pfile != nil {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
}
}
})
// after the daemon is done setting up we can notify systemd api
notifySystem()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
shutdownDaemon(d, 15)
containerdRemote.Cleanup()
if errAPI != nil {
if pfile != nil {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
}
}
logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
}
}
示例14: SystemInfo
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() (*types.Info, error) {
kernelVersion := "<unknown>"
if kv, err := kernel.GetKernelVersion(); err == nil {
kernelVersion = kv.String()
}
operatingSystem := "<unknown>"
if s, err := operatingsystem.GetOperatingSystem(); err == nil {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
meminfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
}
// if we still have the original dockerinit binary from before
// we copied it locally, let's return the path to that, since
// that's more intuitive (the copied path is trivial to derive
// by hand given VERSION)
initPath := utils.DockerInitPath("")
sysInfo := sysinfo.New(true)
var cRunning, cPaused, cStopped int
for _, c := range daemon.List() {
switch c.StateString() {
case "paused":
cPaused++
case "running":
cRunning++
default:
cStopped++
}
}
v := &types.Info{
ID: daemon.ID,
Containers: len(daemon.List()),
ContainersRunning: cRunning,
ContainersPaused: cPaused,
ContainersStopped: cStopped,
Images: len(daemon.imageStore.Map()),
Driver: daemon.GraphDriverName(),
DriverStatus: daemon.layerStore.DriverStatus(),
Plugins: daemon.showPluginsInfo(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNfCallIptablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNfCallIP6tablesDisabled,
Debug: utils.IsDebugEnabled(),
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
ExecutionDriver: daemon.ExecutionDriver().Name(),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion,
OperatingSystem: operatingSystem,
IndexServerAddress: registry.IndexServer,
OSType: platform.OSType,
Architecture: platform.Architecture,
RegistryConfig: daemon.RegistryService.Config,
InitSha1: dockerversion.InitSHA1,
InitPath: initPath,
NCPU: runtime.NumCPU(),
MemTotal: meminfo.MemTotal,
DockerRootDir: daemon.configStore.Root,
Labels: daemon.configStore.Labels,
ExperimentalBuild: utils.ExperimentalBuild(),
ServerVersion: dockerversion.Version,
ClusterStore: daemon.configStore.ClusterStore,
ClusterAdvertise: daemon.configStore.ClusterAdvertise,
HTTPProxy: getProxyEnv("http_proxy"),
HTTPSProxy: getProxyEnv("https_proxy"),
NoProxy: getProxyEnv("no_proxy"),
}
// TODO Windows. Refactor this more once sysinfo is refactored into
// platform specific code. On Windows, sysinfo.cgroupMemInfo and
// sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if
// an attempt is made to access through them.
if runtime.GOOS != "windows" {
v.MemoryLimit = sysInfo.MemoryLimit
v.SwapLimit = sysInfo.SwapLimit
v.OomKillDisable = sysInfo.OomKillDisable
v.CPUCfsPeriod = sysInfo.CPUCfsPeriod
v.CPUCfsQuota = sysInfo.CPUCfsQuota
v.CPUShares = sysInfo.CPUShares
v.CPUSet = sysInfo.Cpuset
}
//.........这里部分代码省略.........
示例15: restore
func (daemon *Daemon) restore() error {
var (
debug = utils.IsDebugEnabled()
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
if !debug {
logrus.Info("Loading containers: start.")
}
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if !debug && logrus.GetLevel() == logrus.InfoLevel {
fmt.Print(".")
}
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
var migrateLegacyLinks bool
restartContainers := make(map[*container.Container]chan struct{})
for _, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
}
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
rm := c.RestartManager(false)
if c.IsRunning() || c.IsPaused() {
// Fix activityCount such that graph mounts can be unmounted later
if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil {
logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err)
return
}
if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
logrus.Errorf("Failed to restore with containerd: %q", err)
return
}
}
// fixme: only if not running
// get list of containers we need to restart
if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
}
// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
if c.HostConfig != nil && c.HostConfig.Links == nil {
migrateLegacyLinks = true
}
}(c)
}
wg.Wait()
// migrate any legacy links from sqlite
linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
var legacyLinkDB *graphdb.Database
if migrateLegacyLinks {
legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
if err != nil {
return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
}
defer legacyLinkDB.Close()
}
// Now that all the containers are registered, register the links
//.........这里部分代码省略.........