本文整理汇总了Golang中github.com/openshift/geard/systemd.Connection函数的典型用法代码示例。如果您正苦于以下问题:Golang Connection函数的具体用法?Golang Connection怎么用?Golang Connection使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Connection函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: disableAllUnits
func disableAllUnits() {
systemd := systemd.Connection()
for _, path := range []string{
filepath.Join(config.ContainerBasePath(), "units"),
} {
filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
if os.IsNotExist(err) {
return nil
}
if err != nil {
log.Printf("init: Can't read %s: %v", p, err)
return nil
}
if info.IsDir() {
return nil
}
if !isSystemdFile(p) {
return nil
}
fmt.Printf("Stopping and disabling %s\n", filepath.Base(p))
if status, err := systemd.StopUnit(filepath.Base(p), "fail"); err != nil {
log.Printf("init: Unable to stop %s: %v, %+v", p, status, err)
}
if _, err := systemd.DisableUnitFiles([]string{p}, false); err != nil {
log.Printf("init: Unable to disable %s: %+v", p, err)
}
return nil
})
if err := systemd.Reload(); err != nil {
log.Printf("init: systemd reload failed: %+v", err)
}
}
}
示例2: waitStart
func waitStart(pChan <-chan netfilter.NFPacket, chanId uint16, waitChan chan<- uint16, hostIp string) {
for true {
p := <-pChan
port, err := portForPacket(p)
if err != nil {
fmt.Println(err)
p.SetVerdict(netfilter.NF_ACCEPT)
waitChan <- chanId
continue
}
id, err := port.IdentifierFor()
if err != nil {
fmt.Println(err)
p.SetVerdict(netfilter.NF_ACCEPT)
waitChan <- chanId
continue
}
cInfo, err := systemd.Connection().GetUnitProperties(id.UnitNameFor())
if err != nil || cInfo["ActiveState"] != "active" {
//TODO: Placeholder for container start detection
fmt.Printf("[%v] Waiting for container %v to start\n", time.Now().Format(time.RFC3339), id)
time.Sleep(time.Second * 5)
fmt.Printf("[%v] Container %v started\n", time.Now().Format(time.RFC3339), id)
iptables.UnidleContainer(id, hostIp)
}
p.SetVerdict(netfilter.NF_ACCEPT)
waitChan <- chanId
}
}
示例3: Execute
func (j *StartedContainerStateRequest) Execute(resp jobs.Response) {
unitName := j.Id.UnitNameFor()
unitPath := j.Id.UnitPathFor()
inState, tooSoon := inStateOrTooSoon(j.Id, unitName, true, false, rateLimitChanges)
if inState {
w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false)
fmt.Fprintf(w, "Container %s starting\n", j.Id)
return
}
if tooSoon {
resp.Failure(ErrStartRequestThrottled)
return
}
if errs := csystemd.SetUnitStartOnBoot(j.Id, true); errs != nil {
log.Print("alter_container_state: Unable to persist whether the unit is started on boot: ", errs)
resp.Failure(ErrContainerStartFailed)
return
}
if err := systemd.EnableAndReloadUnit(systemd.Connection(), unitName, unitPath); err != nil {
if systemd.IsNoSuchUnit(err) || systemd.IsFileNotFound(err) {
resp.Failure(ErrContainerNotFound)
return
}
log.Printf("alter_container_state: Could not enable container %s: %v", unitName, err)
resp.Failure(ErrContainerStartFailed)
return
}
if err := systemd.Connection().StartUnitJob(unitName, "replace"); err != nil {
log.Printf("alter_container_state: Could not start container %s: %v", unitName, err)
resp.Failure(ErrContainerStartFailed)
return
}
w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false)
fmt.Fprintf(w, "Container %s starting\n", j.Id)
}
示例4: unitsMatching
func unitsMatching(re *regexp.Regexp, found func(name string, unit *dbus.UnitStatus)) error {
all, err := systemd.Connection().ListUnits()
if err != nil {
return err
}
for _, unit := range all {
if matched := re.MatchString(unit.Name); matched {
name := re.FindStringSubmatch(unit.Name)[1]
found(name, &unit)
}
}
return nil
}
示例5: initializeRouter
func initializeRouter() error {
if err := systemd.InitializeSystemdFile(systemd.UnitType, hostServiceName, rjobs.UnitRouterTemplate, nil, false); err != nil {
return err
}
systemd.IsUnitProperty(systemd.Connection(), hostServiceName+".service", func(p map[string]interface{}) bool {
switch p["ActiveState"] {
case "active":
break
case "activating":
log.Printf("The Router host service '" + hostServiceName + "' is starting - routing tasks will not be available until it completes")
default:
log.Printf("The Router host service '" + hostServiceName + "' is not started - router operations will not be available")
}
return true
})
return nil
}
示例6: idleContainer
func (idler *Idler) idleContainer(id containers.Identifier) bool {
portPairs, err := containers.GetExistingPorts(id)
if err != nil {
fmt.Printf("idler.idleContainer: Error retrieving ports for container: %v\n", id)
return false
}
iptablePorts, err := iptables.GetIdlerRules(id, false)
if err != nil {
fmt.Printf("idler.idleContainer: Error retrieving ports from iptables: %v\n", id)
return false
}
shouldRecreateRules := false
for _, portPair := range portPairs {
extPort := strconv.Itoa(int(portPair.External))
shouldRecreateRules = shouldRecreateRules || !iptablePorts[extPort]
}
if !shouldRecreateRules {
return false
}
//TODO: Ask geard to idle container
f, err := os.Create(id.IdleUnitPathFor())
if err != nil {
fmt.Printf("idler.idleContainer: Could not create idle marker for %s: %v", id.UnitNameFor(), err)
return false
}
f.Close()
if err := systemd.Connection().StopUnitJob(id.UnitNameFor(), "fail"); err != nil {
fmt.Printf("idler.idleContainer: Could not stop container %s: %v", id.UnitNameFor(), err)
return false
}
iptables.IdleContainer(id, idler.hostIp)
return true
}
示例7: unidleContainer
func (idler *Idler) unidleContainer(id containers.Identifier, p netfilter.NFPacket) {
newChanId, wasAlreadyAssigned := idler.getAvailableWaiter(id)
if newChanId == 0 {
fmt.Println("unidle: Error while finding wait channel")
return
}
if !wasAlreadyAssigned {
//TODO: Ask geard to unidle container
if err := os.Remove(id.IdleUnitPathFor()); err != nil {
fmt.Printf("unidle: Could not remove idle marker for %s: %v", id.UnitNameFor(), err)
p.SetVerdict(netfilter.NF_ACCEPT)
return
}
if err := systemd.Connection().StartUnitJob(id.UnitNameFor(), "fail"); err != nil {
fmt.Printf("unidle: Could not start container %s: %v", id.UnitNameFor(), err)
p.SetVerdict(netfilter.NF_ACCEPT)
return
}
}
p.SetRequeueVerdict(newChanId)
}
示例8: inStateOrTooSoon
func inStateOrTooSoon(id containers.Identifier, unit string, active, transition bool, rateLimit uint64) (inState bool, tooSoon bool) {
if props, erru := systemd.Connection().GetUnitProperties(unit); erru == nil {
switch props["ActiveState"] {
case "active":
if active {
inState = true
return
}
case "activating":
if active {
inState = true
return
} else if transition {
tooSoon = true
return
}
case "inactive", "failed":
if !active {
inState = true
return
}
case "deactivating":
if !active {
inState = true
return
} else if transition {
tooSoon = true
return
}
}
if arr, ok := props["Job"].([]interface{}); ok {
if i, ok := arr[0].(int); ok {
if i != 0 {
log.Printf("alter_container_state: There is an enqueued job against unit %s: %d", unit, i)
inState = true
return
}
}
}
/*now := time.Now().UnixNano() / 1000
if act, ok := props["ActiveEnterTimestamp"]; ok {
if inact, ok := props["InactiveEnterTimestamp"]; ok {
t1 := act.(uint64)
t2 := inact.(uint64)
if transition {
// if active is newest, ignore rate limit
if t1 > t2 {
return
}
} else if !active {
t1, t2 = t2, t1
}
if t2 > t1 {
diff := uint64(now) - t2
if diff < rateLimit {
tooSoon = true
return
}
}
}
}*/
}
return
}
示例9: Execute
func (j *DeleteContainerRequest) Execute(resp JobResponse) {
unitName := j.Id.UnitNameFor()
unitPath := j.Id.UnitPathFor()
unitDefinitionsPath := j.Id.VersionedUnitsPathFor()
idleFlagPath := j.Id.IdleUnitPathFor()
socketUnitPath := j.Id.SocketUnitPathFor()
homeDirPath := j.Id.BaseHomePath()
networkLinksPath := j.Id.NetworkLinksPathFor()
_, err := systemd.Connection().GetUnitProperties(unitName)
switch {
case systemd.IsNoSuchUnit(err):
resp.Success(JobResponseOk)
return
case err != nil:
resp.Failure(ErrDeleteContainerFailed)
return
}
if err := systemd.Connection().StopUnitJob(unitName, "fail"); err != nil {
log.Printf("delete_container: Unable to queue stop unit job: %v", err)
}
ports, err := containers.GetExistingPorts(j.Id)
if err != nil {
if !os.IsNotExist(err) {
log.Printf("delete_container: Unable to read existing port definitions: %v", err)
}
ports = port.PortPairs{}
}
if err := port.ReleaseExternalPorts(ports); err != nil {
log.Printf("delete_container: Unable to release ports: %v", err)
}
if err := os.Remove(unitPath); err != nil && !os.IsNotExist(err) {
resp.Failure(ErrDeleteContainerFailed)
return
}
if err := os.Remove(idleFlagPath); err != nil && !os.IsNotExist(err) {
resp.Failure(ErrDeleteContainerFailed)
return
}
if err := j.Id.SetUnitStartOnBoot(false); err != nil {
log.Printf("delete_container: Unable to clear unit boot state: %v", err)
}
if err := os.Remove(socketUnitPath); err != nil && !os.IsNotExist(err) {
log.Printf("delete_container: Unable to remove socket unit path: %v", err)
}
if err := os.Remove(networkLinksPath); err != nil && !os.IsNotExist(err) {
log.Printf("delete_container: Unable to remove network links file: %v", err)
}
if err := os.RemoveAll(unitDefinitionsPath); err != nil {
log.Printf("delete_container: Unable to remove definitions for container: %v", err)
}
if err := os.RemoveAll(filepath.Dir(homeDirPath)); err != nil {
log.Printf("delete_container: Unable to remove home directory: %v", err)
}
if _, err := systemd.Connection().DisableUnitFiles([]string{unitPath, socketUnitPath}, false); err != nil {
log.Printf("delete_container: Some units have not been disabled: %v", err)
}
if err := systemd.Connection().Reload(); err != nil {
log.Printf("delete_container: Some units have not been disabled: %v", err)
}
resp.Success(JobResponseOk)
}
示例10: Execute
//.........这里部分代码省略.........
Image: req.Image,
PortSpec: portSpec,
Slice: slice + ".slice",
Isolate: req.Isolate,
ReqId: req.RequestIdentifier.String(),
HomeDir: id.HomePath(),
RunDir: id.RunPathFor(),
EnvironmentPath: environmentPath,
ExecutablePath: filepath.Join("/", "usr", "bin", "gear"),
IncludePath: "",
PortPairs: reserved,
SocketUnitName: socketUnitName,
SocketActivationType: socketActivationType,
DockerFeatures: config.SystemDockerFeatures,
}
var templateName string
switch {
case req.SocketActivation:
templateName = "SOCKETACTIVATED"
case config.SystemDockerFeatures.ForegroundRun:
templateName = "FOREGROUND"
default:
templateName = "SIMPLE"
}
if erre := csystemd.ContainerUnitTemplate.ExecuteTemplate(unit, templateName, args); erre != nil {
log.Printf("install_container: Unable to output template: %+v", erre)
resp.Failure(ErrContainerCreateFailed)
defer os.Remove(unitVersionPath)
return
}
if err := unit.Close(); err != nil {
log.Printf("install_container: Unable to finish writing unit: %+v", err)
resp.Failure(ErrContainerCreateFailed)
defer os.Remove(unitVersionPath)
return
}
// swap the new definition with the old one
if err := utils.AtomicReplaceLink(unitVersionPath, unitPath); err != nil {
log.Printf("install_container: Failed to activate new unit: %+v", err)
resp.Failure(ErrContainerCreateFailed)
return
}
state.Close()
// write whether this container should be started on next boot
if req.Started {
if errs := csystemd.SetUnitStartOnBoot(id, true); errs != nil {
log.Print("install_container: Unable to write container boot link: ", err)
resp.Failure(ErrContainerCreateFailed)
return
}
}
// Generate the socket file and ignore failures
paths := []string{unitPath}
if req.SocketActivation {
if err := writeSocketUnit(socketUnitPath, &args); err == nil {
paths = []string{unitPath, socketUnitPath}
}
}
if err := systemd.EnableAndReloadUnit(systemd.Connection(), unitName, paths...); err != nil {
log.Printf("install_container: Could not enable container %s (%v): %v", unitName, paths, err)
resp.Failure(ErrContainerCreateFailed)
return
}
if req.Started {
if req.SocketActivation {
// Start the socket file, not the service and ignore failures
if err := systemd.Connection().StartUnitJob(socketUnitName, "replace"); err != nil {
log.Printf("install_container: Could not start container socket %s: %v", socketUnitName, err)
resp.Failure(ErrContainerCreateFailed)
return
}
} else {
if err := systemd.Connection().StartUnitJob(unitName, "replace"); err != nil {
log.Printf("install_container: Could not start container %s: %v", unitName, err)
resp.Failure(ErrContainerCreateFailed)
return
}
}
}
w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false)
if req.Started {
fmt.Fprintf(w, "Container %s is starting\n", id)
} else {
fmt.Fprintf(w, "Container %s is installed\n", id)
}
}
示例11: Execute
func (j *BuildImageRequest) Execute(resp jobs.Response) {
w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false)
fmt.Fprintf(w, "Processing build-image request:\n")
// TODO: download source, add bind-mount
unitName := containers.JobIdentifier(j.Name).UnitNameForBuild()
unitDescription := fmt.Sprintf("Builder for %s", j.Tag)
stdout, err := systemd.ProcessLogsForUnit(unitName)
if err != nil {
stdout = utils.EmptyReader
log.Printf("job_build_image: Unable to fetch build logs: %s, %+v", err.Error(), err)
}
defer stdout.Close()
conn, errc := systemd.NewConnection()
if errc != nil {
log.Print("job_build_image:", errc)
fmt.Fprintf(w, "Unable to watch start status", errc)
return
}
if err := conn.Subscribe(); err != nil {
log.Print("job_build_image:", err)
fmt.Fprintf(w, "Unable to watch start status", errc)
return
}
defer conn.Unsubscribe()
// make subscription global for efficiency
var (
changes <-chan map[string]*dbus.UnitStatus
errch <-chan error
)
if resp.StreamResult() {
changes, errch = conn.SubscribeUnitsCustom(1*time.Second, 2,
func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool {
return true
},
func(unit string) bool {
return unit != unitName
})
}
fmt.Fprintf(w, "Running sti build unit: %s\n", unitName)
log.Printf("build_image: Running build %s", unitName)
var startCmd []string
if _, err := os.Stat(gearBinaryPath); err != nil {
log.Println("gear executable is not installed on system; using sti builder image")
startCmd = []string{
"/usr/bin/docker", "run",
"-rm",
"-v", "/run/docker.sock:/run/docker.sock",
"-t", buildImage,
"sti", "build", j.Source, j.BaseImage, j.Tag,
"-U", "unix:///run/docker.sock",
}
} else {
startCmd = []string{
gearBinaryPath, "build", j.Source, j.BaseImage, j.Tag,
}
}
if j.RuntimeImage != "" {
startCmd = append(startCmd, "--runtime-image")
startCmd = append(startCmd, j.RuntimeImage)
}
if j.Clean {
startCmd = append(startCmd, "--clean")
}
if j.Verbose {
startCmd = append(startCmd, "--debug")
}
if j.CallbackUrl != "" {
startCmd = append(startCmd, "--callbackUrl="+j.CallbackUrl)
}
log.Printf("build_image: Will execute %v", startCmd)
status, err := systemd.Connection().StartTransientUnit(
unitName,
"fail",
dbus.PropExecStart(startCmd, true),
dbus.PropDescription(unitDescription),
dbus.PropRemainAfterExit(true),
dbus.PropSlice("container-small.slice"),
)
if err != nil {
errType := reflect.TypeOf(err)
fmt.Fprintf(w, "Unable to start build container for this image due to (%s): %s\n", errType, err.Error())
return
} else if status != "done" {
fmt.Fprintf(w, "Build did not complete successfully: %s\n", status)
} else {
//.........这里部分代码省略.........
示例12: Execute
func (j *runContainer) Execute(resp jobs.Response) {
command := j.UnitCommand()
unitName := containers.JobIdentifier(j.Name).UnitNameFor()
unitDescription := fmt.Sprintf("Execute image '%s': %s %s", j.Image, j.Command, strings.Join(command, " "))
var (
stdout io.ReadCloser
changes <-chan map[string]*dbus.UnitStatus
errch <-chan error
)
if resp.StreamResult() {
r, err := systemd.ProcessLogsForUnit(unitName)
if err != nil {
r = utils.EmptyReader
log.Printf("run_container: Unable to fetch container run logs: %s, %+v", err.Error(), err)
}
defer r.Close()
conn, errc := systemd.NewConnection()
if errc != nil {
log.Print("run_container:", errc)
return
}
if err := conn.Subscribe(); err != nil {
log.Print("run_container:", err)
return
}
defer conn.Unsubscribe()
// make subscription global for efficiency
c, ech := conn.SubscribeUnitsCustom(1*time.Second, 2,
func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool {
return true
},
func(unit string) bool {
return unit != unitName
})
stdout = r
changes = c
errch = ech
}
log.Printf("run_container: Running container %s", unitName)
status, err := systemd.Connection().StartTransientUnit(
unitName,
"fail",
dbus.PropExecStart(command, true),
dbus.PropDescription(unitDescription),
dbus.PropRemainAfterExit(true),
dbus.PropSlice("container.slice"),
)
switch {
case err != nil:
errType := reflect.TypeOf(err)
resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Unable to start container execution due to (%s): %s", errType, err.Error())})
return
case status != "done":
resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Start did not complete successfully: %s", status)})
return
case stdout == nil:
resp.Success(jobs.ResponseOk)
return
}
w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false)
go io.Copy(w, stdout)
wait:
for {
select {
case c := <-changes:
if changed, ok := c[unitName]; ok {
if changed.SubState != "running" {
break wait
}
}
case err := <-errch:
fmt.Fprintf(w, "Error %+v\n", err)
case <-time.After(1 * time.Minute):
log.Print("run_container:", "timeout")
break wait
}
}
stdout.Close()
}