本文整理匯總了Golang中github.com/mistifyio/go-zfs.GetZpool函數的典型用法代碼示例。如果您正苦於以下問題:Golang GetZpool函數的具體用法?Golang GetZpool怎麽用?Golang GetZpool使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了GetZpool函數的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Status
// Status returns information about the ZFS filesystem. It returns a two dimensional array of information
// such as pool name, dataset name, disk usage, parent quota and compression used.
// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent',
// 'Space Available', 'Parent Quota' and 'Compression'.
func (d *Driver) Status() [][2]string {
parts := strings.Split(d.dataset.Name, "/")
pool, err := zfs.GetZpool(parts[0])
var poolName, poolHealth string
if err == nil {
poolName = pool.Name
poolHealth = pool.Health
} else {
poolName = fmt.Sprintf("error while getting pool information %v", err)
poolHealth = "not available"
}
quota := "no"
if d.dataset.Quota != 0 {
quota = strconv.FormatUint(d.dataset.Quota, 10)
}
return [][2]string{
{"Zpool", poolName},
{"Zpool Health", poolHealth},
{"Parent Dataset", d.dataset.Name},
{"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)},
{"Space Available", strconv.FormatUint(d.dataset.Avail, 10)},
{"Parent Quota", quota},
{"Compression", d.dataset.Compression},
}
}
示例2: TestProviderAutomaticFileVdevZpoolCreation
func (ZpoolTests) TestProviderAutomaticFileVdevZpoolCreation(c *C) {
dataset := "testpool-dinosaur"
// don't actually use ioutil.Tempfile;
// we want to exerise the path where the file doesn't exist.
backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
defer os.Remove(backingFilePath)
provider, err := NewProvider(&ProviderConfig{
DatasetName: dataset,
Make: &MakeDev{
BackingFilename: backingFilePath,
Size: one_gig,
},
})
defer func() {
pool, _ := gzfs.GetZpool(dataset)
if pool != nil {
pool.Destroy()
}
}()
c.Assert(err, IsNil)
c.Assert(provider, NotNil)
// also, we shouldn't get any '/testpool' dir at root
_, err = os.Stat(dataset)
c.Assert(err, NotNil)
c.Assert(os.IsNotExist(err), Equals, true)
}
示例3: TestOrphanedZpoolFileAdoption
func (ZpoolTests) TestOrphanedZpoolFileAdoption(c *C) {
dataset := "testpool-bananagram"
backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
defer os.Remove(backingFilePath)
provider, err := NewProvider(&ProviderConfig{
DatasetName: dataset,
Make: &MakeDev{
BackingFilename: backingFilePath,
Size: one_gig,
},
})
defer func() {
pool, _ := gzfs.GetZpool(dataset)
if pool != nil {
pool.Destroy()
}
}()
c.Assert(err, IsNil)
c.Assert(provider, NotNil)
// add a dataset to this zpool, so we can check for it on the flip side
markerDatasetName := path.Join(dataset, "testfs")
_, err = gzfs.CreateFilesystem(markerDatasetName, nil)
c.Assert(err, IsNil)
// do a 'zpool export'
// this roughly approximates what we see after a host reboot
// (though host reboots may leave it in an even more unclean state than this)
err = exec.Command("zpool", "export", "-f", dataset).Run()
c.Assert(err, IsNil)
// sanity check that our test is doing the right thing: zpool forgot about these
_, err = gzfs.GetDataset(dataset)
c.Assert(err, NotNil)
_, err = gzfs.GetDataset(markerDatasetName)
c.Assert(err, NotNil)
// if we create another provider with the same file vdev path, it should
// pick up that file again without wrecking the dataset
provider, err = NewProvider(&ProviderConfig{
DatasetName: dataset,
Make: &MakeDev{
BackingFilename: backingFilePath,
Size: one_gig,
},
})
c.Assert(err, IsNil)
c.Assert(provider, NotNil)
_, err = gzfs.GetDataset(markerDatasetName)
c.Assert(err, IsNil)
}
示例4: TearDownTest
func (s *TempZpool) TearDownTest(c *C) {
if s.ZpoolVdevFilePath != "" {
os.Remove(s.ZpoolVdevFilePath)
}
pool, _ := gzfs.GetZpool(s.ZpoolName)
if pool != nil {
if datasets, err := pool.Datasets(); err == nil {
for _, dataset := range datasets {
dataset.Destroy(gzfs.DestroyRecursive | gzfs.DestroyForceUmount)
os.Remove(dataset.Mountpoint)
}
}
err := pool.Destroy()
c.Assert(err, IsNil)
}
}
示例5: TestProviderExistingZpoolDetection
func (ZpoolTests) TestProviderExistingZpoolDetection(c *C) {
dataset := "testpool-festival"
backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
defer os.Remove(backingFilePath)
provider, err := NewProvider(&ProviderConfig{
DatasetName: dataset,
Make: &MakeDev{
BackingFilename: backingFilePath,
Size: one_gig,
},
})
defer func() {
pool, _ := gzfs.GetZpool(dataset)
if pool != nil {
pool.Destroy()
}
}()
c.Assert(err, IsNil)
c.Assert(provider, NotNil)
// if we create another provider with the same dataset, it should
// see the existing one and thus shouldn't hit the MakeDev path
badFilePath := "/tmp/zfs-test-should-not-exist"
provider, err = NewProvider(&ProviderConfig{
DatasetName: dataset,
Make: &MakeDev{
BackingFilename: badFilePath,
Size: one_gig,
},
})
c.Assert(err, IsNil)
c.Assert(provider, NotNil)
_, err = os.Stat(badFilePath)
c.Assert(err, NotNil)
c.Assert(os.IsNotExist(err), Equals, true)
}
示例6: TestNonZpoolFilesFailImport
func (ZpoolTests) TestNonZpoolFilesFailImport(c *C) {
dataset := "testpool-landslide"
backingFile, err := ioutil.TempFile("/tmp/", "zfs-")
c.Assert(err, IsNil)
backingFile.Write([]byte{'a', 'b', 'c'})
backingFile.Close()
provider, err := NewProvider(&ProviderConfig{
DatasetName: dataset,
Make: &MakeDev{
BackingFilename: backingFile.Name(),
Size: one_gig,
},
})
defer func() {
pool, _ := gzfs.GetZpool(dataset)
if pool != nil {
pool.Destroy()
}
}()
c.Assert(err, NotNil)
c.Assert(provider, IsNil)
}
示例7: TestPersistence
// covers basic volume persistence and named volume persistence
func (s *PersistenceTests) TestPersistence(c *C) {
idString := random.String(12)
vmanDBfilePath := fmt.Sprintf("/tmp/flynn-volumes-%s.bolt", idString)
zfsDatasetName := fmt.Sprintf("flynn-test-dataset-%s", idString)
zfsVdevFilePath := fmt.Sprintf("/tmp/flynn-test-zpool-%s.vdev", idString)
defer os.Remove(vmanDBfilePath)
defer os.Remove(zfsVdevFilePath)
defer func() {
pool, _ := gzfs.GetZpool(zfsDatasetName)
if pool != nil {
if datasets, err := pool.Datasets(); err == nil {
for _, dataset := range datasets {
dataset.Destroy(gzfs.DestroyRecursive | gzfs.DestroyForceUmount)
os.Remove(dataset.Mountpoint)
}
}
err := pool.Destroy()
c.Assert(err, IsNil)
}
}()
// new volume manager with a new backing zfs vdev file and a new boltdb
volProv, err := zfs.NewProvider(&zfs.ProviderConfig{
DatasetName: zfsDatasetName,
Make: &zfs.MakeDev{
BackingFilename: zfsVdevFilePath,
Size: int64(math.Pow(2, float64(30))),
},
})
c.Assert(err, IsNil)
// new volume manager with that shiny new backing zfs vdev file and a new boltdb
vman := volumemanager.New(
vmanDBfilePath,
log15.New(),
func() (volume.Provider, error) { return volProv, nil },
)
c.Assert(vman.OpenDB(), IsNil)
// make two volumes
vol1, err := vman.NewVolume()
c.Assert(err, IsNil)
vol2, err := vman.NewVolume()
c.Assert(err, IsNil)
// assert existence of filesystems; emplace some data
f, err := os.Create(filepath.Join(vol1.Location(), "alpha"))
c.Assert(err, IsNil)
f.Close()
// close persistence
c.Assert(vman.CloseDB(), IsNil)
// delete the second volume so we can check it doesn't prevent
// a later restore
c.Assert(volProv.DestroyVolume(vol2), IsNil)
// hack zfs export/umounting to emulate host shutdown
err = exec.Command("zpool", "export", "-f", zfsDatasetName).Run()
c.Assert(err, IsNil)
// sanity check: assert the filesystems are gone
// note that the directories remain present after 'zpool export'
_, err = os.Stat(filepath.Join(vol1.Location(), "alpha"))
c.Assert(os.IsNotExist(err), Equals, true)
// restore
vman = volumemanager.New(
vmanDBfilePath,
log15.New(),
func() (volume.Provider, error) {
c.Fatal("default provider setup should not be called if the previous provider was restored")
return nil, nil
},
)
c.Assert(vman.OpenDB(), IsNil)
// assert volumes
restoredVolumes := vman.Volumes()
c.Assert(restoredVolumes, HasLen, 1)
c.Assert(restoredVolumes[vol1.Info().ID], NotNil)
c.Assert(restoredVolumes[vol2.Info().ID], IsNil)
// switch to the new volume references; do a bunch of smell checks on those
vol1restored := restoredVolumes[vol1.Info().ID]
assertInfoEqual(c, vol1restored, vol1)
c.Assert(vol1restored.Provider(), NotNil)
// assert existences of filesystems and previous data
c.Assert(vol1restored.Location(), testutils.DirContains, []string{"alpha"})
}
示例8: TestTransmittedSnapshotPersistence
func (s *PersistenceTests) TestTransmittedSnapshotPersistence(c *C) {
idString := random.String(12)
vmanDBfilePath := fmt.Sprintf("/tmp/flynn-volumes-%s.bolt", idString)
zfsDatasetName := fmt.Sprintf("flynn-test-dataset-%s", idString)
zfsVdevFilePath := fmt.Sprintf("/tmp/flynn-test-zpool-%s.vdev", idString)
defer os.Remove(vmanDBfilePath)
defer os.Remove(zfsVdevFilePath)
defer func() {
pool, _ := gzfs.GetZpool(zfsDatasetName)
if pool != nil {
if datasets, err := pool.Datasets(); err == nil {
for _, dataset := range datasets {
dataset.Destroy(gzfs.DestroyRecursive | gzfs.DestroyForceUmount)
os.Remove(dataset.Mountpoint)
}
}
err := pool.Destroy()
c.Assert(err, IsNil)
}
}()
// new volume provider with a new backing zfs vdev file
volProv, err := zfs.NewProvider(&zfs.ProviderConfig{
DatasetName: zfsDatasetName,
Make: &zfs.MakeDev{
BackingFilename: zfsVdevFilePath,
Size: int64(math.Pow(2, float64(30))),
},
})
c.Assert(err, IsNil)
// new volume manager with that shiny new backing zfs vdev file and a new boltdb
vman := volumemanager.New(
vmanDBfilePath,
log15.New(),
func() (volume.Provider, error) { return volProv, nil },
)
c.Assert(vman.OpenDB(), IsNil)
// make a volume
vol1, err := vman.NewVolume()
c.Assert(err, IsNil)
// assert existence of filesystems; emplace some data
f, err := os.Create(filepath.Join(vol1.Location(), "alpha"))
c.Assert(err, IsNil)
f.Close()
// make a snapshot, make a new volume to receive it, and do the transmit
snap, err := vman.CreateSnapshot(vol1.Info().ID)
vol2, err := vman.NewVolume()
c.Assert(err, IsNil)
var buf bytes.Buffer
haves, err := vman.ListHaves(vol2.Info().ID)
c.Assert(err, IsNil)
err = vman.SendSnapshot(snap.Info().ID, haves, &buf)
c.Assert(err, IsNil)
snapTransmitted, err := vman.ReceiveSnapshot(vol2.Info().ID, &buf)
// sanity check: snapshot transmission worked
c.Assert(vol2.Location(), testutils.DirContains, []string{"alpha"})
c.Assert(snapTransmitted.Location(), testutils.DirContains, []string{"alpha"})
// close persistence
c.Assert(vman.CloseDB(), IsNil)
// hack zfs export/umounting to emulate host shutdown
err = exec.Command("zpool", "export", "-f", zfsDatasetName).Run()
c.Assert(err, IsNil)
// sanity check: assert the filesystems are gone
// note that the directories remain present after 'zpool export'
_, err = os.Stat(filepath.Join(snap.Location(), "alpha"))
c.Assert(os.IsNotExist(err), Equals, true)
_, err = os.Stat(filepath.Join(snapTransmitted.Location(), "alpha"))
c.Assert(os.IsNotExist(err), Equals, true)
// restore
vman = volumemanager.New(
vmanDBfilePath,
log15.New(),
func() (volume.Provider, error) {
c.Fatal("default provider setup should not be called if the previous provider was restored")
return nil, nil
},
)
c.Assert(vman.OpenDB(), IsNil)
// assert volumes
restoredVolumes := vman.Volumes()
c.Assert(restoredVolumes, HasLen, 4)
c.Assert(restoredVolumes[vol1.Info().ID], NotNil)
c.Assert(restoredVolumes[snap.Info().ID], NotNil)
c.Assert(restoredVolumes[vol2.Info().ID], NotNil)
c.Assert(restoredVolumes[snapTransmitted.Info().ID], NotNil)
// still look like a snapshot?
snapRestored := restoredVolumes[snapTransmitted.Info().ID]
assertInfoEqual(c, snapRestored, snapTransmitted)
c.Assert(snapRestored.IsSnapshot(), Equals, true)
//.........這裏部分代碼省略.........