本文整理匯總了Golang中github.com/flynn/flynn/host/volume/zfs.NewProvider函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewProvider函數的具體用法?Golang NewProvider怎麽用?Golang NewProvider使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewProvider函數的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: NewProvider
func NewProvider(pspec *volume.ProviderSpec) (provider volume.Provider, err error) {
switch pspec.Kind {
case "zfs":
config := &zfs.ProviderConfig{}
if err := json.Unmarshal(pspec.Config, config); err != nil {
return nil, err
}
if provider, err = zfs.NewProvider(config); err != nil {
return
}
return
default:
return nil, volume.UnknownProviderKind
}
}
示例2: TestPersistence
// covers basic volume persistence and named volume persistence
func (s *PersistenceTests) TestPersistence(c *C) {
idString := random.String(12)
vmanDBfilePath := fmt.Sprintf("/tmp/flynn-volumes-%s.bolt", idString)
zfsDatasetName := fmt.Sprintf("flynn-test-dataset-%s", idString)
zfsVdevFilePath := fmt.Sprintf("/tmp/flynn-test-zpool-%s.vdev", idString)
defer os.Remove(vmanDBfilePath)
defer os.Remove(zfsVdevFilePath)
defer func() {
pool, _ := gzfs.GetZpool(zfsDatasetName)
if pool != nil {
if datasets, err := pool.Datasets(); err == nil {
for _, dataset := range datasets {
dataset.Destroy(gzfs.DestroyRecursive | gzfs.DestroyForceUmount)
os.Remove(dataset.Mountpoint)
}
}
err := pool.Destroy()
c.Assert(err, IsNil)
}
}()
// new volume manager with a new backing zfs vdev file and a new boltdb
volProv, err := zfs.NewProvider(&zfs.ProviderConfig{
DatasetName: zfsDatasetName,
Make: &zfs.MakeDev{
BackingFilename: zfsVdevFilePath,
Size: int64(math.Pow(2, float64(30))),
},
})
c.Assert(err, IsNil)
// new volume manager with that shiny new backing zfs vdev file and a new boltdb
vman := volumemanager.New(
vmanDBfilePath,
func() (volume.Provider, error) { return volProv, nil },
)
c.Assert(vman.OpenDB(), IsNil)
// make a volume
vol1, err := vman.NewVolume()
c.Assert(err, IsNil)
// assert existence of filesystems; emplace some data
f, err := os.Create(filepath.Join(vol1.Location(), "alpha"))
c.Assert(err, IsNil)
f.Close()
// close persistence
c.Assert(vman.CloseDB(), IsNil)
// hack zfs export/umounting to emulate host shutdown
err = exec.Command("zpool", "export", "-f", zfsDatasetName).Run()
c.Assert(err, IsNil)
// sanity check: assert the filesystems are gone
// note that the directories remain present after 'zpool export'
_, err = os.Stat(filepath.Join(vol1.Location(), "alpha"))
c.Assert(os.IsNotExist(err), Equals, true)
// restore
vman = volumemanager.New(
vmanDBfilePath,
func() (volume.Provider, error) {
c.Fatal("default provider setup should not be called if the previous provider was restored")
return nil, nil
},
)
c.Assert(vman.OpenDB(), IsNil)
// assert volumes
restoredVolumes := vman.Volumes()
c.Assert(restoredVolumes, HasLen, 2)
c.Assert(restoredVolumes[vol1.Info().ID], NotNil)
// switch to the new volume references; do a bunch of smell checks on those
vol1restored := restoredVolumes[vol1.Info().ID]
c.Assert(vol1restored.Info(), DeepEquals, vol1.Info())
c.Assert(vol1restored.Provider(), NotNil)
// assert existences of filesystems and previous data
c.Assert(vol1restored.Location(), testutils.DirContains, []string{"alpha"})
}
示例3: runDaemon
func runDaemon(args *docopt.Args) {
hostname, _ := os.Hostname()
externalIP := args.String["--external-ip"]
stateFile := args.String["--state"]
hostID := args.String["--id"]
force := args.Bool["--force"]
volPath := args.String["--volpath"]
backendName := args.String["--backend"]
flynnInit := args.String["--flynn-init"]
nsumount := args.String["--nsumount"]
logDir := args.String["--log-dir"]
discoveryToken := args.String["--discovery"]
var peerIPs []string
if args.String["--peer-ips"] != "" {
peerIPs = strings.Split(args.String["--peer-ips"], ",")
}
grohl.AddContext("app", "host")
grohl.Log(grohl.Data{"at": "start"})
g := grohl.NewContext(grohl.Data{"fn": "main"})
if hostID == "" {
hostID = strings.Replace(hostname, "-", "", -1)
}
if strings.Contains(hostID, "-") {
shutdown.Fatal("host id must not contain dashes")
}
if externalIP == "" {
var err error
externalIP, err = config.DefaultExternalIP()
if err != nil {
shutdown.Fatal(err)
}
}
publishAddr := net.JoinHostPort(externalIP, "1113")
if discoveryToken != "" {
// TODO: retry
discoveryID, err := discovery.RegisterInstance(discovery.Info{
ClusterURL: discoveryToken,
InstanceURL: "http://" + publishAddr,
Name: hostID,
})
if err != nil {
g.Log(grohl.Data{"at": "register_discovery", "status": "error", "err": err.Error()})
shutdown.Fatal(err)
}
g.Log(grohl.Data{"at": "register_discovery", "id": discoveryID})
}
state := NewState(hostID, stateFile)
var backend Backend
var err error
// create volume manager
vman, err := volumemanager.New(
filepath.Join(volPath, "volumes.bolt"),
func() (volume.Provider, error) {
// use a zpool backing file size of either 70% of the device on which
// volumes will reside, or 100GB if that can't be determined.
var size int64
var dev syscall.Statfs_t
if err := syscall.Statfs(volPath, &dev); err == nil {
size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
} else {
size = 100000000000
}
g.Log(grohl.Data{"at": "zpool_size", "size": size})
return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
DatasetName: "flynn-default",
Make: &zfsVolume.MakeDev{
BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
Size: size,
},
WorkingDir: filepath.Join(volPath, "zfs"),
})
},
)
if err != nil {
shutdown.Fatal(err)
}
mux := logmux.New(1000)
shutdown.BeforeExit(func() { mux.Close() })
switch backendName {
case "libvirt-lxc":
backend, err = NewLibvirtLXCBackend(state, vman, logDir, flynnInit, nsumount, mux)
default:
log.Fatalf("unknown backend %q", backendName)
}
if err != nil {
shutdown.Fatal(err)
}
backend.SetDefaultEnv("EXTERNAL_IP", externalIP)
discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr)
publishURL := "http://" + publishAddr
//.........這裏部分代碼省略.........
示例4: runDownload
func runDownload(args *docopt.Args) error {
log := log15.New()
log.Info("initializing ZFS volumes")
volPath := args.String["--volpath"]
volDB := filepath.Join(volPath, "volumes.bolt")
volMan := volumemanager.New(volDB, log, func() (volume.Provider, error) {
return zfs.NewProvider(&zfs.ProviderConfig{
DatasetName: zfs.DefaultDatasetName,
Make: zfs.DefaultMakeDev(volPath, log),
WorkingDir: filepath.Join(volPath, "zfs"),
})
})
if err := volMan.OpenDB(); err != nil {
log.Error("error opening volume database, make sure flynn-host is not running", "err", err)
return err
}
// create a TUF client and update it
log.Info("initializing TUF client")
tufDB := args.String["--tuf-db"]
local, err := tuf.FileLocalStore(tufDB)
if err != nil {
log.Error("error creating local TUF client", "err", err)
return err
}
remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("downloader"))
if err != nil {
log.Error("error creating remote TUF client", "err", err)
return err
}
client := tuf.NewClient(local, remote)
if err := updateTUFClient(client); err != nil {
log.Error("error updating TUF client", "err", err)
return err
}
configDir := args.String["--config-dir"]
requestedVersion := os.Getenv("FLYNN_VERSION")
if requestedVersion == "" {
requestedVersion, err = getChannelVersion(configDir, client, log)
if err != nil {
return err
}
}
log.Info(fmt.Sprintf("downloading components with version %s", requestedVersion))
d := downloader.New(client, volMan, requestedVersion)
binDir := args.String["--bin-dir"]
log.Info(fmt.Sprintf("downloading binaries to %s", binDir))
if _, err := d.DownloadBinaries(binDir); err != nil {
log.Error("error downloading binaries", "err", err)
return err
}
// use the requested version of flynn-host to download the images as
// the format changed in v20161106
if version.String() != requestedVersion {
log.Info(fmt.Sprintf("executing %s flynn-host binary", requestedVersion))
binPath := filepath.Join(binDir, "flynn-host")
argv := append([]string{binPath}, os.Args[1:]...)
return syscall.Exec(binPath, argv, os.Environ())
}
log.Info("downloading images")
ch := make(chan *ct.ImagePullInfo)
go func() {
for info := range ch {
switch info.Type {
case ct.ImagePullTypeImage:
log.Info(fmt.Sprintf("pulling %s image", info.Name))
case ct.ImagePullTypeLayer:
log.Info(fmt.Sprintf("pulling %s layer %s (%s)",
info.Name, info.Layer.ID, units.BytesSize(float64(info.Layer.Length))))
}
}
}()
if err := d.DownloadImages(configDir, ch); err != nil {
log.Error("error downloading images", "err", err)
return err
}
log.Info(fmt.Sprintf("downloading config to %s", configDir))
if _, err := d.DownloadConfig(configDir); err != nil {
log.Error("error downloading config", "err", err)
return err
}
log.Info("download complete")
return nil
}
示例5: runDaemon
//.........這裏部分代碼省略.........
log.Info("registering with cluster discovery service", "token", discoveryToken, "addr", publishAddr, "name", hostID)
discoveryID, err := discovery.RegisterInstance(discovery.Info{
ClusterURL: discoveryToken,
InstanceURL: "http://" + publishAddr,
Name: hostID,
})
if err != nil {
log.Error("error registering with cluster discovery service", "err", err)
shutdown.Fatal(err)
}
log.Info("registered with cluster discovery service", "id", discoveryID)
}
state := NewState(hostID, stateFile)
shutdown.BeforeExit(func() { state.CloseDB() })
log.Info("initializing volume manager", "provider", volProvider)
var newVolProvider func() (volume.Provider, error)
switch volProvider {
case "zfs":
newVolProvider = func() (volume.Provider, error) {
// use a zpool backing file size of either 70% of the device on which
// volumes will reside, or 100GB if that can't be determined.
log.Info("determining ZFS zpool size")
var size int64
var dev syscall.Statfs_t
if err := syscall.Statfs(volPath, &dev); err == nil {
size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
} else {
size = 100000000000
}
log.Info(fmt.Sprintf("using ZFS zpool size %d", size))
return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
DatasetName: "flynn-default",
Make: &zfsVolume.MakeDev{
BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
Size: size,
},
WorkingDir: filepath.Join(volPath, "zfs"),
})
}
case "mock":
newVolProvider = func() (volume.Provider, error) { return nil, nil }
default:
shutdown.Fatalf("unknown volume provider: %q", volProvider)
}
vman := volumemanager.New(
filepath.Join(volPath, "volumes.bolt"),
newVolProvider,
)
shutdown.BeforeExit(func() { vman.CloseDB() })
mux := logmux.New(hostID, logDir, logger.New("host.id", hostID, "component", "logmux"))
log.Info("initializing job backend", "type", backendName)
var backend Backend
switch backendName {
case "libcontainer":
backend, err = NewLibcontainerBackend(state, vman, bridgeName, flynnInit, mux, partitionCGroups, logger.New("host.id", hostID, "component", "backend", "backend", "libcontainer"))
case "mock":
backend = MockBackend{}
default:
shutdown.Fatalf("unknown backend %q", backendName)
}
if err != nil {
示例6: TestTransmittedSnapshotPersistence
func (s *PersistenceTests) TestTransmittedSnapshotPersistence(c *C) {
idString := random.String(12)
vmanDBfilePath := fmt.Sprintf("/tmp/flynn-volumes-%s.bolt", idString)
zfsDatasetName := fmt.Sprintf("flynn-test-dataset-%s", idString)
zfsVdevFilePath := fmt.Sprintf("/tmp/flynn-test-zpool-%s.vdev", idString)
defer os.Remove(vmanDBfilePath)
defer os.Remove(zfsVdevFilePath)
defer func() {
pool, _ := gzfs.GetZpool(zfsDatasetName)
if pool != nil {
if datasets, err := pool.Datasets(); err == nil {
for _, dataset := range datasets {
dataset.Destroy(gzfs.DestroyRecursive | gzfs.DestroyForceUmount)
os.Remove(dataset.Mountpoint)
}
}
err := pool.Destroy()
c.Assert(err, IsNil)
}
}()
// new volume provider with a new backing zfs vdev file
volProv, err := zfs.NewProvider(&zfs.ProviderConfig{
DatasetName: zfsDatasetName,
Make: &zfs.MakeDev{
BackingFilename: zfsVdevFilePath,
Size: int64(math.Pow(2, float64(30))),
},
})
c.Assert(err, IsNil)
// new volume manager with that shiny new backing zfs vdev file and a new boltdb
vman := volumemanager.New(
vmanDBfilePath,
log15.New(),
func() (volume.Provider, error) { return volProv, nil },
)
c.Assert(vman.OpenDB(), IsNil)
// make a volume
vol1, err := vman.NewVolume()
c.Assert(err, IsNil)
// assert existence of filesystems; emplace some data
f, err := os.Create(filepath.Join(vol1.Location(), "alpha"))
c.Assert(err, IsNil)
f.Close()
// make a snapshot, make a new volume to receive it, and do the transmit
snap, err := vman.CreateSnapshot(vol1.Info().ID)
vol2, err := vman.NewVolume()
c.Assert(err, IsNil)
var buf bytes.Buffer
haves, err := vman.ListHaves(vol2.Info().ID)
c.Assert(err, IsNil)
err = vman.SendSnapshot(snap.Info().ID, haves, &buf)
c.Assert(err, IsNil)
snapTransmitted, err := vman.ReceiveSnapshot(vol2.Info().ID, &buf)
// sanity check: snapshot transmission worked
c.Assert(vol2.Location(), testutils.DirContains, []string{"alpha"})
c.Assert(snapTransmitted.Location(), testutils.DirContains, []string{"alpha"})
// close persistence
c.Assert(vman.CloseDB(), IsNil)
// hack zfs export/umounting to emulate host shutdown
err = exec.Command("zpool", "export", "-f", zfsDatasetName).Run()
c.Assert(err, IsNil)
// sanity check: assert the filesystems are gone
// note that the directories remain present after 'zpool export'
_, err = os.Stat(filepath.Join(snap.Location(), "alpha"))
c.Assert(os.IsNotExist(err), Equals, true)
_, err = os.Stat(filepath.Join(snapTransmitted.Location(), "alpha"))
c.Assert(os.IsNotExist(err), Equals, true)
// restore
vman = volumemanager.New(
vmanDBfilePath,
log15.New(),
func() (volume.Provider, error) {
c.Fatal("default provider setup should not be called if the previous provider was restored")
return nil, nil
},
)
c.Assert(vman.OpenDB(), IsNil)
// assert volumes
restoredVolumes := vman.Volumes()
c.Assert(restoredVolumes, HasLen, 4)
c.Assert(restoredVolumes[vol1.Info().ID], NotNil)
c.Assert(restoredVolumes[snap.Info().ID], NotNil)
c.Assert(restoredVolumes[vol2.Info().ID], NotNil)
c.Assert(restoredVolumes[snapTransmitted.Info().ID], NotNil)
// still look like a snapshot?
snapRestored := restoredVolumes[snapTransmitted.Info().ID]
assertInfoEqual(c, snapRestored, snapTransmitted)
c.Assert(snapRestored.IsSnapshot(), Equals, true)
//.........這裏部分代碼省略.........