本文整理汇总了Golang中github.com/lxc/lxd/shared.AddSlash函数的典型用法代码示例。如果您正苦于以下问题:Golang AddSlash函数的具体用法?Golang AddSlash怎么用?Golang AddSlash使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AddSlash函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: rsyncMigrationSink
func rsyncMigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error {
/* the first object is the actual container */
if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil {
return err
}
if len(snapshots) > 0 {
err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700)
if err != nil {
return err
}
}
for _, snap := range snapshots {
if err := RsyncRecv(shared.AddSlash(snap.Path()), conn); err != nil {
return err
}
}
if live {
/* now receive the final sync */
if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil {
return err
}
}
return nil
}
示例2: rsyncMigrationSink
func rsyncMigrationSink(container container, snapshots []container, conn *websocket.Conn) error {
/* the first object is the actual container */
if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil {
return err
}
for _, snap := range snapshots {
if err := RsyncRecv(shared.AddSlash(snap.Path()), conn); err != nil {
return err
}
}
return nil
}
示例3: SendWhileRunning
func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn) error {
for _, send := range s.snapshots {
if err := send.StorageStart(); err != nil {
return err
}
defer send.StorageStop()
path := send.Path()
if err := RsyncSend(shared.AddSlash(path), conn); err != nil {
return err
}
}
return RsyncSend(shared.AddSlash(s.container.Path()), conn)
}
示例4: SendWhileRunning
func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation) error {
for _, send := range s.snapshots {
if err := send.StorageStart(); err != nil {
return err
}
defer send.StorageStop()
path := send.Path()
wrapper := StorageProgressReader(op, "fs_progress", send.Name())
if err := RsyncSend(shared.AddSlash(path), conn, wrapper); err != nil {
return err
}
}
wrapper := StorageProgressReader(op, "fs_progress", s.container.Name())
return RsyncSend(shared.AddSlash(s.container.Path()), conn, wrapper)
}
示例5: SendWhileRunning
func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn) error {
toSend := append([]container{s.container}, s.snapshots...)
for _, send := range toSend {
path := send.Path()
if err := RsyncSend(shared.AddSlash(path), conn); err != nil {
return err
}
}
return nil
}
示例6: rsyncCopy
// rsyncCopy copies a directory using rsync (with the --devices option).
func (ss *storageShared) rsyncCopy(source string, dest string) (string, error) {
if err := os.MkdirAll(dest, 0755); err != nil {
return "", err
}
output, err := exec.Command(
"rsync",
"-a",
"--devices",
shared.AddSlash(source),
dest).CombinedOutput()
return string(output), err
}
示例7: storageRsyncCopy
// storageRsyncCopy copies a directory using rsync (with the --devices option).
func storageRsyncCopy(source string, dest string) (string, error) {
if err := os.MkdirAll(dest, 0755); err != nil {
return "", err
}
rsyncVerbosity := "-q"
if *debug {
rsyncVerbosity = "-vi"
}
output, err := exec.Command(
"rsync",
"-a",
"-HAX",
"--devices",
"--delete",
rsyncVerbosity,
shared.AddSlash(source),
dest).CombinedOutput()
return string(output), err
}
示例8: Do
//.........这里部分代码省略.........
}
myType := s.container.Storage().MigrationType()
header := MigrationHeader{
Fs: &myType,
Criu: criuType,
Idmap: idmaps,
Snapshots: snapshots,
}
if err := s.send(&header); err != nil {
s.sendControl(err)
return err
}
if fsErr != nil {
s.sendControl(fsErr)
return fsErr
}
if err := s.recv(&header); err != nil {
s.sendControl(err)
return err
}
// TODO: actually fall back on rsync.
if *header.Fs != myType {
err := fmt.Errorf("mismatched storage types not supported yet")
s.sendControl(err)
return err
}
if s.live {
if header.Criu == nil {
err := fmt.Errorf("Got no CRIU socket type for live migration")
s.sendControl(err)
return err
} else if *header.Criu != CRIUType_CRIU_RSYNC {
err := fmt.Errorf("Formats other than criu rsync not understood")
s.sendControl(err)
return err
}
checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_")
if err != nil {
s.sendControl(err)
return err
}
defer os.RemoveAll(checkpointDir)
opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true}
err = s.container.Checkpoint(opts)
if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil {
shared.Debugf("Error collecting checkpoint log file %s", err)
}
if err != nil {
log := GetCRIULogErrors(checkpointDir, "dump")
err = fmt.Errorf("checkpoint failed:\n%s", log)
s.sendControl(err)
return err
}
/*
* We do the serially right now, but there's really no reason for us
* to; since we have separate websockets, we can do it in parallel if
* we wanted to. However, assuming we're network bound, there's really
* no reason to do these in parallel. In the future when we're using
* p.haul's protocol, it will make sense to do these in parallel.
*/
if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil {
s.sendControl(err)
return err
}
}
for _, source := range sources {
shared.Debugf("sending fs object %s", source.Name())
if err := source.Send(s.fsConn); err != nil {
s.sendControl(err)
return err
}
}
msg := MigrationControl{}
if err := s.recv(&msg); err != nil {
s.disconnect()
return err
}
// TODO: should we add some config here about automatically restarting
// the container migrate failure? What about the failures above?
if !*msg.Success {
return fmt.Errorf(*msg.Message)
}
return nil
}
示例9: Do
//.........这里部分代码省略.........
return fmt.Errorf("restore failed, failing CRIU")
}
return nil
},
nil,
func(op *operation, r *http.Request, w http.ResponseWriter) error {
secret := r.FormValue("secret")
if secret == "" {
return fmt.Errorf("missing secret")
}
if secret != actionScriptOpSecret {
return os.ErrPermission
}
c, err := shared.WebsocketUpgrader.Upgrade(w, r, nil)
if err != nil {
return err
}
dumpDone <- true
closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
return c.WriteMessage(websocket.CloseMessage, closeMsg)
},
)
if err != nil {
os.RemoveAll(checkpointDir)
return abort(err)
}
if err := writeActionScript(checkpointDir, actionScriptOp.url, actionScriptOpSecret); err != nil {
os.RemoveAll(checkpointDir)
return abort(err)
}
_, err = actionScriptOp.Run()
if err != nil {
os.RemoveAll(checkpointDir)
return abort(err)
}
go func() {
dumpSuccess <- s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, true)
os.RemoveAll(checkpointDir)
}()
select {
/* the checkpoint failed, let's just abort */
case err = <-dumpSuccess:
return abort(err)
/* the dump finished, let's continue on to the restore */
case <-dumpDone:
shared.LogDebugf("Dump finished, continuing with restore...")
}
} else {
defer os.RemoveAll(checkpointDir)
if err := s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, false); err != nil {
return abort(err)
}
}
/*
* We do the serially right now, but there's really no reason for us
* to; since we have separate websockets, we can do it in parallel if
* we wanted to. However, assuming we're network bound, there's really
* no reason to do these in parallel. In the future when we're using
* p.haul's protocol, it will make sense to do these in parallel.
*/
if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn, nil); err != nil {
return abort(err)
}
if err := driver.SendAfterCheckpoint(s.fsConn); err != nil {
return abort(err)
}
}
driver.Cleanup()
msg := MigrationControl{}
if err := s.recv(&msg); err != nil {
s.disconnect()
return err
}
if s.live {
restoreSuccess <- *msg.Success
err := <-dumpSuccess
if err != nil {
shared.LogErrorf("dump failed after successful restore?: %q", err)
}
}
if !*msg.Success {
return fmt.Errorf(*msg.Message)
}
return nil
}
示例10: SendAfterCheckpoint
func (s rsyncStorageSourceDriver) SendAfterCheckpoint(conn *websocket.Conn) error {
/* resync anything that changed between our first send and the checkpoint */
return RsyncSend(shared.AddSlash(s.container.Path()), conn)
}
示例11: containerSnapRestore
func containerSnapRestore(d *Daemon, name string, snap string) error {
// normalize snapshot name
if !shared.IsSnapshot(snap) {
snap = fmt.Sprintf("%s/%s", name, snap)
}
shared.Debugf("RESTORE => Restoring snapshot [%s] on container [%s]", snap, name)
/*
* restore steps:
* 1. stop container if already running
* 2. overwrite existing config with snapshot config
* 3. copy snapshot rootfs to container
*/
wasRunning := false
c, err := newLxdContainer(name, d)
if err != nil {
shared.Debugf("RESTORE => Error: newLxdContainer() failed for container", err)
return err
}
// 1. stop container
// TODO: stateful restore ?
if c.c.Running() {
wasRunning = true
if err = c.Stop(); err != nil {
shared.Debugf("RESTORE => Error: could not stop container", err)
return err
}
shared.Debugf("RESTORE => Stopped container %s", name)
}
// 2, replace config
// Make sure the source exists.
source, err := newLxdContainer(snap, d)
if err != nil {
shared.Debugf("RESTORE => Error: newLxdContainer() failed for snapshot", err)
return err
}
newConfig := containerConfigReq{}
newConfig.Config = source.config
newConfig.Profiles = source.profiles
newConfig.Devices = source.devices
tx, err := containerReplaceConfig(d, c, name, newConfig)
if err != nil {
shared.Debugf("RESTORE => err #4", err)
return err
}
if err := txCommit(tx); err != nil {
return err
}
// 3. copy rootfs
// TODO: btrfs optimizations
containerRootPath := shared.VarPath("lxc", name)
if !shared.IsDir(path.Dir(containerRootPath)) {
shared.Debugf("RESTORE => containerRoot [%s] directory does not exist", containerRootPath)
return os.ErrNotExist
}
var snapshotRootFSPath string
snapshotRootFSPath = shared.AddSlash(snapshotRootfsDir(c, strings.SplitN(snap, "/", 2)[1]))
containerRootFSPath := shared.AddSlash(fmt.Sprintf("%s/%s", containerRootPath, "rootfs"))
shared.Debugf("RESTORE => Copying %s to %s", snapshotRootFSPath, containerRootFSPath)
rsyncVerbosity := "-q"
if *debug {
rsyncVerbosity = "-vi"
}
output, err := exec.Command("rsync", "-a", "-c", "-HAX", "--devices", "--delete", rsyncVerbosity, snapshotRootFSPath, containerRootFSPath).CombinedOutput()
shared.Debugf("RESTORE => rsync output\n%s", output)
if err == nil && !source.isPrivileged() {
err = setUnprivUserAcl(c, containerRootPath)
if err != nil {
shared.Debugf("Error adding acl for container root: falling back to chmod\n")
output, err := exec.Command("chmod", "+x", containerRootPath).CombinedOutput()
if err != nil {
shared.Debugf("Error chmoding the container root\n")
shared.Debugf(string(output))
return err
}
}
} else {
shared.Debugf("rsync failed:\n%s", output)
return err
}
if wasRunning {
c.Start()
}
return nil
//.........这里部分代码省略.........
示例12: do
func (c *migrationSink) do() error {
var err error
c.controlConn, err = c.connectWithSecret(c.controlSecret)
if err != nil {
return err
}
defer c.disconnect()
c.fsConn, err = c.connectWithSecret(c.fsSecret)
if err != nil {
c.sendControl(err)
return err
}
if c.live {
c.criuConn, err = c.connectWithSecret(c.criuSecret)
if err != nil {
c.sendControl(err)
return err
}
}
// For now, we just ignore whatever the server sends us. We only
// support RSYNC, so that's what we respond with.
header := MigrationHeader{}
if err := c.recv(&header); err != nil {
c.sendControl(err)
return err
}
criuType := CRIUType_CRIU_RSYNC.Enum()
if !c.live {
criuType = nil
}
resp := MigrationHeader{Fs: MigrationFSType_RSYNC.Enum(), Criu: criuType}
if err := c.send(&resp); err != nil {
c.sendControl(err)
return err
}
restore := make(chan error)
go func(c *migrationSink) {
imagesDir := ""
srcIdmap := new(shared.IdmapSet)
dstIdmap := c.IdmapSet
if dstIdmap == nil {
dstIdmap = new(shared.IdmapSet)
}
if c.live {
var err error
imagesDir, err = ioutil.TempDir("", "lxd_migration_")
if err != nil {
os.RemoveAll(imagesDir)
c.sendControl(err)
return
}
defer func() {
err := CollectCRIULogFile(c.container, imagesDir, "migration", "restore")
/*
* If the checkpoint fails, we won't have any log to collect,
* so don't warn about that.
*/
if err != nil && !os.IsNotExist(err) {
shared.Debugf("Error collectiong migration log file %s", err)
}
os.RemoveAll(imagesDir)
}()
if err := RsyncRecv(shared.AddSlash(imagesDir), c.criuConn); err != nil {
restore <- err
os.RemoveAll(imagesDir)
c.sendControl(err)
return
}
/*
* For unprivileged containers we need to shift the
* perms on the images images so that they can be
* opened by the process after it is in its user
* namespace.
*/
if dstIdmap != nil {
if err := dstIdmap.ShiftRootfs(imagesDir); err != nil {
restore <- err
os.RemoveAll(imagesDir)
c.sendControl(err)
return
}
}
}
fsDir := c.container.ConfigItem("lxc.rootfs")[0]
if err := RsyncRecv(shared.AddSlash(fsDir), c.fsConn); err != nil {
restore <- err
c.sendControl(err)
//.........这里部分代码省略.........
示例13: Do
func (s *migrationSourceWs) Do() shared.OperationResult {
<-s.allConnected
criuType := CRIUType_CRIU_RSYNC.Enum()
if !s.live {
criuType = nil
}
idmaps := make([]*IDMapType, 0)
if s.idmapset != nil {
for _, ctnIdmap := range s.idmapset.Idmap {
idmap := IDMapType{
Isuid: proto.Bool(ctnIdmap.Isuid),
Isgid: proto.Bool(ctnIdmap.Isgid),
Hostid: proto.Int(ctnIdmap.Hostid),
Nsid: proto.Int(ctnIdmap.Nsid),
Maprange: proto.Int(ctnIdmap.Maprange),
}
idmaps = append(idmaps, &idmap)
}
}
header := MigrationHeader{
Fs: MigrationFSType_RSYNC.Enum(),
Criu: criuType,
Idmap: idmaps,
}
if err := s.send(&header); err != nil {
s.sendControl(err)
return shared.OperationError(err)
}
if err := s.recv(&header); err != nil {
s.sendControl(err)
return shared.OperationError(err)
}
if *header.Fs != MigrationFSType_RSYNC {
err := fmt.Errorf("Formats other than rsync not understood")
s.sendControl(err)
return shared.OperationError(err)
}
if s.live {
if header.Criu == nil {
err := fmt.Errorf("Got no CRIU socket type for live migration")
s.sendControl(err)
return shared.OperationError(err)
} else if *header.Criu != CRIUType_CRIU_RSYNC {
err := fmt.Errorf("Formats other than criu rsync not understood")
s.sendControl(err)
return shared.OperationError(err)
}
checkpointDir, err := ioutil.TempDir("", "lxd_migration_")
if err != nil {
s.sendControl(err)
return shared.OperationError(err)
}
defer os.RemoveAll(checkpointDir)
opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true}
err = s.container.Checkpoint(opts)
if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil {
shared.Debugf("Error collecting checkpoint log file %s", err)
}
if err != nil {
log := GetCRIULogErrors(checkpointDir, "dump")
err = fmt.Errorf("checkpoint failed:\n%s", log)
s.sendControl(err)
return shared.OperationError(err)
}
/*
* We do the serially right now, but there's really no reason for us
* to; since we have separate websockets, we can do it in parallel if
* we wanted to. However, assuming we're network bound, there's really
* no reason to do these in parallel. In the future when we're using
* p.haul's protocol, it will make sense to do these in parallel.
*/
if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil {
s.sendControl(err)
return shared.OperationError(err)
}
}
fsDir := s.container.ConfigItem("lxc.rootfs")[0]
if err := RsyncSend(shared.AddSlash(fsDir), s.fsConn); err != nil {
s.sendControl(err)
return shared.OperationError(err)
}
msg := MigrationControl{}
if err := s.recv(&msg); err != nil {
//.........这里部分代码省略.........
示例14: containerSnapshotsPost
func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
name := mux.Vars(r)["name"]
/*
* snapshot is a three step operation:
* 1. choose a new name
* 2. copy the database info over
* 3. copy over the rootfs
*/
c, err := newLxdContainer(name, d)
if err != nil {
return SmartError(err)
}
raw := shared.Jmap{}
if err := json.NewDecoder(r.Body).Decode(&raw); err != nil {
return BadRequest(err)
}
snapshotName, err := raw.GetString("name")
if err != nil || snapshotName == "" {
// come up with a name
i := nextSnapshot(d, name)
snapshotName = fmt.Sprintf("snap%d", i)
}
stateful, err := raw.GetBool("stateful")
if err != nil {
return BadRequest(err)
}
fullName := fmt.Sprintf("%s/%s", name, snapshotName)
snapDir := snapshotDir(c, snapshotName)
if shared.PathExists(snapDir) {
shared.Log.Error(
"Snapshot exists on disk",
log.Ctx{
"name": fullName,
"path": snapDir})
return Conflict
}
err = os.MkdirAll(snapDir, 0700)
if err != nil {
return InternalError(err)
}
snapshot := func() error {
StateDir := snapshotStateDir(c, snapshotName)
err = os.MkdirAll(StateDir, 0700)
if err != nil {
os.RemoveAll(snapDir)
return err
}
if stateful {
// TODO - shouldn't we freeze for the duration of rootfs snapshot below?
if !c.c.Running() {
os.RemoveAll(snapDir)
return fmt.Errorf("Container not running\n")
}
opts := lxc.CheckpointOptions{Directory: StateDir, Stop: true, Verbose: true}
if err := c.c.Checkpoint(opts); err != nil {
os.RemoveAll(snapDir)
return err
}
}
/* Create the db info */
args := containerLXDArgs{
Ctype: cTypeSnapshot,
Config: c.config,
Profiles: c.profiles,
Ephemeral: c.ephemeral,
BaseImage: c.config["volatile.baseImage"],
Architecture: c.architecture,
}
_, err := dbContainerCreate(d.db, fullName, args)
if err != nil {
os.RemoveAll(snapDir)
return err
}
/* Create the directory and rootfs, set perms */
/* Copy the rootfs */
oldPath := shared.AddSlash(shared.VarPath("containers", name, "rootfs"))
newPath := snapshotRootfsDir(c, snapshotName)
err = exec.Command("rsync", "-a", "--devices", oldPath, newPath).Run()
if err != nil {
os.RemoveAll(snapDir)
dbContainerSnapshotRemove(d.db, name, snapshotName)
}
return err
}
return AsyncResponse(shared.OperationWrap(snapshot), nil)
}
示例15: Do
//.........这里部分代码省略.........
s.sendControl(err)
return err
}
if fsErr != nil {
s.sendControl(fsErr)
return fsErr
}
if err := s.recv(&header); err != nil {
s.sendControl(err)
return err
}
if *header.Fs != myType {
myType = MigrationFSType_RSYNC
header.Fs = &myType
driver, _ = rsyncMigrationSource(s.container)
}
defer driver.Cleanup()
if err := driver.SendWhileRunning(s.fsConn); err != nil {
s.sendControl(err)
return err
}
if s.live {
if header.Criu == nil {
err := fmt.Errorf("Got no CRIU socket type for live migration")
s.sendControl(err)
return err
} else if *header.Criu != CRIUType_CRIU_RSYNC {
err := fmt.Errorf("Formats other than criu rsync not understood")
s.sendControl(err)
return err
}
checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_")
if err != nil {
s.sendControl(err)
return err
}
defer os.RemoveAll(checkpointDir)
opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true}
err = s.container.Checkpoint(opts)
if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil {
shared.Debugf("Error collecting checkpoint log file %s", err)
}
if err != nil {
log, err2 := GetCRIULogErrors(checkpointDir, "dump")
/* couldn't find the CRIU log file which means we
* didn't even get that far; give back the liblxc
* error. */
if err2 != nil {
log = err.Error()
}
err = fmt.Errorf("checkpoint failed:\n%s", log)
s.sendControl(err)
return err
}
/*
* We do the serially right now, but there's really no reason for us
* to; since we have separate websockets, we can do it in parallel if
* we wanted to. However, assuming we're network bound, there's really
* no reason to do these in parallel. In the future when we're using
* p.haul's protocol, it will make sense to do these in parallel.
*/
if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil {
s.sendControl(err)
return err
}
if err := driver.SendAfterCheckpoint(s.fsConn); err != nil {
s.sendControl(err)
return err
}
}
msg := MigrationControl{}
if err := s.recv(&msg); err != nil {
s.disconnect()
return err
}
// TODO: should we add some config here about automatically restarting
// the container migrate failure? What about the failures above?
if !*msg.Success {
return fmt.Errorf(*msg.Message)
}
return nil
}