本文整理汇总了Golang中github.com/juju/juju/container.NewDirectory函数的典型用法代码示例。如果您正苦于以下问题:Golang NewDirectory函数的具体用法?Golang NewDirectory怎么用?Golang NewDirectory使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewDirectory函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestRemoveContainerDir
func (s *DirectorySuite) TestRemoveContainerDir(c *gc.C) {
dir, err := container.NewDirectory("testing")
c.Assert(err, jc.ErrorIsNil)
err = container.RemoveDirectory("testing")
c.Assert(err, jc.ErrorIsNil)
c.Assert(dir, jc.DoesNotExist)
c.Assert(filepath.Join(s.removedDir, "testing"), jc.IsDirectory)
}
示例2: CreateContainer
func (manager *containerManager) CreateContainer(
machineConfig *cloudinit.MachineConfig,
series string,
network *container.NetworkConfig,
) (instance.Instance, *instance.HardwareCharacteristics, error) {
name := names.NewMachineTag(machineConfig.MachineId).String()
if manager.name != "" {
name = fmt.Sprintf("%s-%s", manager.name, name)
}
// Note here that the kvmObjectFacotry only returns a valid container
// object, and doesn't actually construct the underlying kvm container on
// disk.
kvmContainer := KvmObjectFactory.New(name)
// Create the cloud-init.
directory, err := container.NewDirectory(name)
if err != nil {
return nil, nil, fmt.Errorf("failed to create container directory: %v", err)
}
logger.Tracef("write cloud-init")
userDataFilename, err := container.WriteUserData(machineConfig, directory)
if err != nil {
return nil, nil, errors.LoggedErrorf(logger, "failed to write user data: %v", err)
}
// Create the container.
startParams = ParseConstraintsToStartParams(machineConfig.Constraints)
startParams.Arch = version.Current.Arch
startParams.Series = series
startParams.Network = network
startParams.UserDataFile = userDataFilename
// If the Simplestream requested is anything but released, update
// our StartParams to request it.
if machineConfig.ImageStream != imagemetadata.ReleasedStream {
startParams.ImageDownloadUrl = imagemetadata.UbuntuCloudImagesURL + "/" + machineConfig.ImageStream
}
var hardware instance.HardwareCharacteristics
hardware, err = instance.ParseHardware(
fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v",
startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
if err != nil {
logger.Warningf("failed to parse hardware: %v", err)
}
logger.Tracef("create the container, constraints: %v", machineConfig.Constraints)
if err := kvmContainer.Start(startParams); err != nil {
return nil, nil, errors.LoggedErrorf(logger, "kvm container creation failed: %v", err)
}
logger.Tracef("kvm container created")
return &kvmInstance{kvmContainer, name}, &hardware, nil
}
示例3: TestRemoveContainerDirWithClash
func (s *DirectorySuite) TestRemoveContainerDirWithClash(c *gc.C) {
dir, err := container.NewDirectory("testing")
c.Assert(err, jc.ErrorIsNil)
clash := filepath.Join(s.removedDir, "testing")
err = os.MkdirAll(clash, 0755)
c.Assert(err, jc.ErrorIsNil)
err = container.RemoveDirectory("testing")
c.Assert(err, jc.ErrorIsNil)
c.Assert(dir, jc.DoesNotExist)
c.Assert(filepath.Join(s.removedDir, "testing.1"), jc.IsDirectory)
}
示例4: TestNewContainerDir
func (*DirectorySuite) TestNewContainerDir(c *gc.C) {
dir, err := container.NewDirectory("testing")
c.Assert(err, jc.ErrorIsNil)
c.Assert(dir, jc.IsDirectory)
}
示例5: EnsureCloneTemplate
// Make sure a template exists that we can clone from.
func EnsureCloneTemplate(
backingFilesystem string,
series string,
networkConfig *container.NetworkConfig,
authorizedKeys string,
aptProxy proxy.Settings,
aptMirror string,
enablePackageUpdates bool,
enableOSUpgrades bool,
imageURLGetter container.ImageURLGetter,
useAUFS bool,
callback func(containerStatus status.Status, info string, data map[string]interface{}) error,
) (_ golxc.Container, err error) {
name := fmt.Sprintf("juju-%s-lxc-template", series)
defer func() {
if err != nil {
callback(status.StatusProvisioningError, fmt.Sprintf("Creating container: %v", err), nil)
}
}()
containerDirectory, err := container.NewDirectory(name)
if err != nil {
return nil, err
}
lock, err := AcquireTemplateLock(name, "ensure clone exists")
if err != nil {
return nil, err
}
defer lock.Unlock()
lxcContainer := LxcObjectFactory.New(name)
// Early exit if the container has been constructed before.
if lxcContainer.IsConstructed() {
logger.Infof("template exists, continuing")
return lxcContainer, nil
}
logger.Infof("template does not exist, creating")
callback(status.StatusAllocating, "Creating template container; downloading image may take some time", nil)
userData, err := containerinit.TemplateUserData(
series,
authorizedKeys,
aptProxy,
aptMirror,
enablePackageUpdates,
enableOSUpgrades,
networkConfig,
)
if err != nil {
logger.Tracef("failed to create template user data for template: %v", err)
return nil, err
}
userDataFilename, err := containerinit.WriteCloudInitFile(containerDirectory, userData)
if err != nil {
return nil, err
}
templateParams := []string{
"--debug", // Debug errors in the cloud image
"--userdata", userDataFilename, // Our groovey cloud-init
"--hostid", name, // Use the container name as the hostid
"-r", series,
}
var caCert []byte
if imageURLGetter != nil {
arch := arch.HostArch()
imageURL, err := imageURLGetter.ImageURL(instance.LXC, series, arch)
if err != nil {
return nil, errors.Annotatef(err, "cannot determine cached image URL")
}
templateParams = append(templateParams, "-T", imageURL)
caCert = imageURLGetter.CACert()
}
var extraCreateArgs []string
if backingFilesystem == Btrfs {
extraCreateArgs = append(extraCreateArgs, "-B", Btrfs)
}
// Create the container.
logger.Tracef("create the template container")
err = createContainer(
lxcContainer,
containerDirectory,
networkConfig,
extraCreateArgs,
templateParams,
caCert,
)
if err != nil {
logger.Errorf("lxc template container creation failed: %v", err)
return nil, err
}
// Make sure that the mount dir has been created.
logger.Tracef("make the mount dir for the shared logs")
if err := os.MkdirAll(internalLogDir(name), 0755); err != nil {
logger.Tracef("failed to create internal /var/log/juju mount dir: %v", err)
return nil, err
//.........这里部分代码省略.........
示例6: CreateContainer
func (manager *containerManager) CreateContainer(
instanceConfig *instancecfg.InstanceConfig,
cons constraints.Value,
series string,
networkConfig *container.NetworkConfig,
storageConfig *container.StorageConfig,
callback container.StatusCallback,
) (_ instance.Instance, _ *instance.HardwareCharacteristics, err error) {
name, err := manager.namespace.Hostname(instanceConfig.MachineId)
if err != nil {
return nil, nil, errors.Trace(err)
}
defer func() {
if err != nil {
callback(status.ProvisioningError, fmt.Sprintf("Creating container: %v", err), nil)
}
}()
// Set the MachineContainerHostname to match the name returned by virsh list
instanceConfig.MachineContainerHostname = name
// Note here that the kvmObjectFactory only returns a valid container
// object, and doesn't actually construct the underlying kvm container on
// disk.
kvmContainer := KvmObjectFactory.New(name)
// Create the cloud-init.
directory, err := container.NewDirectory(name)
if err != nil {
return nil, nil, errors.Annotate(err, "failed to create container directory")
}
logger.Tracef("write cloud-init")
userDataFilename, err := containerinit.WriteUserData(instanceConfig, networkConfig, directory)
if err != nil {
logger.Infof("machine config api %#v", *instanceConfig.APIInfo)
err = errors.Annotate(err, "failed to write user data")
logger.Infof(err.Error())
return nil, nil, err
}
// Create the container.
startParams = ParseConstraintsToStartParams(cons)
startParams.Arch = arch.HostArch()
startParams.Series = series
startParams.Network = networkConfig
startParams.UserDataFile = userDataFilename
// If the Simplestream requested is anything but released, update
// our StartParams to request it.
if instanceConfig.ImageStream != imagemetadata.ReleasedStream {
startParams.ImageDownloadUrl = imagemetadata.UbuntuCloudImagesURL + "/" + instanceConfig.ImageStream
}
var hardware instance.HardwareCharacteristics
hardware, err = instance.ParseHardware(
fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cores=%v",
startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
if err != nil {
return nil, nil, errors.Annotate(err, "failed to parse hardware")
}
callback(status.Allocating, "Creating container; it might take some time", nil)
logger.Tracef("create the container, constraints: %v", cons)
if err := kvmContainer.Start(startParams); err != nil {
err = errors.Annotate(err, "kvm container creation failed")
logger.Infof(err.Error())
return nil, nil, err
}
logger.Tracef("kvm container created")
return &kvmInstance{kvmContainer, name}, &hardware, nil
}
示例7: EnsureCloneTemplate
// Make sure a template exists that we can clone from.
func EnsureCloneTemplate(
backingFilesystem string,
series string,
network *container.NetworkConfig,
authorizedKeys string,
aptProxy proxy.Settings,
) (golxc.Container, error) {
name := fmt.Sprintf("juju-%s-template", series)
containerDirectory, err := container.NewDirectory(name)
if err != nil {
return nil, err
}
lock, err := AcquireTemplateLock(name, "ensure clone exists")
if err != nil {
return nil, err
}
defer lock.Unlock()
lxcContainer := LxcObjectFactory.New(name)
// Early exit if the container has been constructed before.
if lxcContainer.IsConstructed() {
logger.Infof("template exists, continuing")
return lxcContainer, nil
}
logger.Infof("template does not exist, creating")
userData, err := templateUserData(series, authorizedKeys, aptProxy)
if err != nil {
logger.Tracef("failed to create template user data for template: %v", err)
return nil, err
}
userDataFilename, err := container.WriteCloudInitFile(containerDirectory, userData)
if err != nil {
return nil, err
}
configFile, err := writeLxcConfig(network, containerDirectory)
if err != nil {
logger.Errorf("failed to write config file: %v", err)
return nil, err
}
templateParams := []string{
"--debug", // Debug errors in the cloud image
"--userdata", userDataFilename, // Our groovey cloud-init
"--hostid", name, // Use the container name as the hostid
"-r", series,
}
var extraCreateArgs []string
if backingFilesystem == Btrfs {
extraCreateArgs = append(extraCreateArgs, "-B", Btrfs)
}
// Create the container.
logger.Tracef("create the container")
if err := lxcContainer.Create(configFile, defaultTemplate, extraCreateArgs, templateParams); err != nil {
logger.Errorf("lxc container creation failed: %v", err)
return nil, err
}
// Make sure that the mount dir has been created.
logger.Tracef("make the mount dir for the shared logs")
if err := os.MkdirAll(internalLogDir(name), 0755); err != nil {
logger.Tracef("failed to create internal /var/log/juju mount dir: %v", err)
return nil, err
}
// Start the lxc container with the appropriate settings for grabbing the
// console output and a log file.
consoleFile := filepath.Join(containerDirectory, "console.log")
lxcContainer.SetLogFile(filepath.Join(containerDirectory, "container.log"), golxc.LogDebug)
logger.Tracef("start the container")
// We explicitly don't pass through the config file to the container.Start
// method as we have passed it through at container creation time. This
// is necessary to get the appropriate rootfs reference without explicitly
// setting it ourselves.
if err = lxcContainer.Start("", consoleFile); err != nil {
logger.Errorf("container failed to start: %v", err)
return nil, err
}
logger.Infof("template container started, now wait for it to stop")
// Perhaps we should wait for it to finish, and the question becomes "how
// long do we wait for it to complete?"
console, err := os.Open(consoleFile)
if err != nil {
// can't listen
return nil, err
}
tailWriter := &logTail{tick: time.Now()}
consoleTailer := tailer.NewTailer(console, tailWriter, nil)
defer consoleTailer.Stop()
// We should wait maybe 1 minute between output?
// if no output check to see if stopped
// If we have no output and still running, something has probably gone wrong
for lxcContainer.IsRunning() {
if tailWriter.lastTick().Before(time.Now().Add(-TemplateStopTimeout)) {
logger.Infof("not heard anything from the template log for five minutes")
return nil, fmt.Errorf("template container %q did not stop", name)
//.........这里部分代码省略.........
示例8: CreateContainer
// CreateContainer creates or clones an LXC container.
func (manager *containerManager) CreateContainer(
instanceConfig *instancecfg.InstanceConfig,
series string,
networkConfig *container.NetworkConfig,
storageConfig *container.StorageConfig,
) (inst instance.Instance, _ *instance.HardwareCharacteristics, err error) {
// Check our preconditions
if manager == nil {
panic("manager is nil")
} else if series == "" {
panic("series not set")
} else if networkConfig == nil {
panic("networkConfig is nil")
} else if storageConfig == nil {
panic("storageConfig is nil")
}
// Log how long the start took
defer func(start time.Time) {
if err == nil {
logger.Tracef("container %q started: %v", inst.Id(), time.Now().Sub(start))
}
}(time.Now())
name := names.NewMachineTag(instanceConfig.MachineId).String()
if manager.name != "" {
name = fmt.Sprintf("%s-%s", manager.name, name)
}
// Create the cloud-init.
directory, err := container.NewDirectory(name)
if err != nil {
return nil, nil, errors.Annotate(err, "failed to create a directory for the container")
}
logger.Tracef("write cloud-init")
userDataFilename, err := containerinit.WriteUserData(instanceConfig, networkConfig, directory)
if err != nil {
return nil, nil, errors.Annotate(err, "failed to write user data")
}
var lxcContainer golxc.Container
if manager.createWithClone {
templateContainer, err := EnsureCloneTemplate(
manager.backingFilesystem,
series,
networkConfig,
instanceConfig.AuthorizedKeys,
instanceConfig.AptProxySettings,
instanceConfig.AptMirror,
instanceConfig.EnableOSRefreshUpdate,
instanceConfig.EnableOSUpgrade,
manager.imageURLGetter,
manager.useAUFS,
)
if err != nil {
return nil, nil, errors.Annotate(err, "failed to retrieve the template to clone")
}
templateParams := []string{
"--debug", // Debug errors in the cloud image
"--userdata", userDataFilename, // Our groovey cloud-init
"--hostid", name, // Use the container name as the hostid
}
var extraCloneArgs []string
if manager.backingFilesystem == Btrfs || manager.useAUFS {
extraCloneArgs = append(extraCloneArgs, "--snapshot")
}
if manager.backingFilesystem != Btrfs && manager.useAUFS {
extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs")
}
lock, err := AcquireTemplateLock(templateContainer.Name(), "clone")
if err != nil {
return nil, nil, errors.Annotate(err, "failed to acquire lock on template")
}
defer lock.Unlock()
// Ensure the run-time effective config of the template
// container has correctly ordered network settings, otherwise
// Clone() below will fail. This is needed in case we haven't
// created a new template now but are reusing an existing one.
// See LP bug #1414016.
configPath := containerConfigFilename(templateContainer.Name())
if _, err := reorderNetworkConfig(configPath); err != nil {
return nil, nil, errors.Annotate(err, "failed to reorder network settings")
}
lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams)
if err != nil {
return nil, nil, errors.Annotate(err, "lxc container cloning failed")
}
} else {
// Note here that the lxcObjectFacotry only returns a valid container
// object, and doesn't actually construct the underlying lxc container on
// disk.
lxcContainer = LxcObjectFactory.New(name)
templateParams := []string{
"--debug", // Debug errors in the cloud image
"--userdata", userDataFilename, // Our groovey cloud-init
"--hostid", name, // Use the container name as the hostid
//.........这里部分代码省略.........