本文整理汇总了Golang中k8s/io/kubernetes/pkg/volume.Spec类的典型用法代码示例。如果您正苦于以下问题:Golang Spec类的具体用法?Golang Spec怎么用?Golang Spec使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Spec类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: newBuilderInternal
func (p *pwxPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pwxManager, mounter mount.Interface) (volume.Builder, error) {
var readOnly bool
var px *api.PwxVolumeSource
if spec.Volume != nil && spec.Volume.PwxDisk != nil {
px = spec.Volume.PwxDisk
readOnly = px.ReadOnly
} else {
px = spec.PersistentVolume.Spec.PwxDisk
readOnly = spec.ReadOnly
}
volid := px.VolumeID
fsType := px.FSType
return &pwxBuilder{
pwxDisk: &pwxDisk{
podUID: podUID,
volName: spec.Name(),
VolumeID: volid,
mounter: mounter,
manager: manager,
plugin: p,
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
}
示例2: newBuilderInternal
func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var fc *api.FCVolumeSource
if spec.Volume != nil && spec.Volume.FC != nil {
fc = spec.Volume.FC
readOnly = fc.ReadOnly
} else {
fc = spec.PersistentVolume.Spec.FC
readOnly = spec.ReadOnly
}
if fc.Lun == nil {
return nil, fmt.Errorf("empty lun")
}
lun := strconv.Itoa(*fc.Lun)
return &fcDiskBuilder{
fcDisk: &fcDisk{
podUID: podUID,
volName: spec.Name(),
wwns: fc.TargetWWNs,
lun: lun,
manager: manager,
io: &osIOHandler{},
plugin: plugin},
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
}, nil
}
示例3: AddVolumeNode
func (asw *actualStateOfWorld) AddVolumeNode(
volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error) {
asw.Lock()
defer asw.Unlock()
attachableVolumePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
volumeObj = attachedVolume{
volumeName: volumeName,
spec: volumeSpec,
nodesAttachedTo: make(map[types.NodeName]nodeAttachedTo),
devicePath: devicePath,
}
} else {
// If volume object already exists, it indicates that the information would be out of date.
// Update the fields for volume object except the nodes attached to the volumes.
volumeObj.devicePath = devicePath
volumeObj.spec = volumeSpec
glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
volumeName,
nodeName,
devicePath)
}
asw.attachedVolumes[volumeName] = volumeObj
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists {
// Create object if it doesn't exist.
volumeObj.nodesAttachedTo[nodeName] = nodeAttachedTo{
nodeName: nodeName,
mountedByNode: true, // Assume mounted, until proven otherwise
mountedByNodeSetCount: 0,
detachRequestedTime: time.Time{},
}
} else {
glog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q",
volumeName,
nodeName)
}
asw.addVolumeToReportAsAttached(volumeName, nodeName)
return volumeName, nil
}
示例4: newMounterInternal
func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
pdName := volumeSource.PDName
partition := ""
if volumeSource.Partition != 0 {
partition = strconv.Itoa(int(volumeSource.Partition))
}
return &gcePersistentDiskMounter{
gcePersistentDisk: &gcePersistentDisk{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
partition: partition,
mounter: mounter,
manager: manager,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
readOnly: readOnly}, nil
}
示例5: newMounterInternal
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
fc, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
if fc.Lun == nil {
return nil, fmt.Errorf("empty lun")
}
lun := strconv.Itoa(int(*fc.Lun))
return &fcDiskMounter{
fcDisk: &fcDisk{
podUID: podUID,
volName: spec.Name(),
wwns: fc.TargetWWNs,
lun: lun,
manager: manager,
io: &osIOHandler{},
plugin: plugin},
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
}, nil
}
示例6: NewMounter
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &configMapVolumeMounter{
configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
source: *spec.Volume.ConfigMap,
pod: *pod,
opts: &opts}, nil
}
示例7: newBuilderInternal
func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var iscsi *api.ISCSIVolumeSource
if spec.Volume != nil && spec.Volume.ISCSI != nil {
iscsi = spec.Volume.ISCSI
readOnly = iscsi.ReadOnly
} else {
iscsi = spec.PersistentVolume.Spec.ISCSI
readOnly = spec.ReadOnly
}
lun := strconv.Itoa(iscsi.Lun)
portal := portalBuilder(iscsi.TargetPortal)
iface := iscsi.ISCSIInterface
return &iscsiDiskBuilder{
iscsiDisk: &iscsiDisk{
podUID: podUID,
volName: spec.Name(),
portal: portal,
iqn: iscsi.IQN,
lun: lun,
iface: iface,
manager: manager,
plugin: plugin},
fsType: iscsi.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
}, nil
}
示例8: newMounterInternal
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
cephvs := plugin.getVolumeSource(spec)
id := cephvs.User
if id == "" {
id = "admin"
}
path := cephvs.Path
if path == "" {
path = "/"
}
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
secret_file := cephvs.SecretFile
if secret_file == "" {
secret_file = "/etc/ceph/" + id + ".secret"
}
return &cephfsMounter{
cephfs: &cephfs{
podUID: podUID,
volName: spec.Name(),
mon: cephvs.Monitors,
path: path,
secret: secret,
id: id,
secret_file: secret_file,
readonly: cephvs.ReadOnly,
mounter: mounter,
plugin: plugin},
}, nil
}
示例9: newBuilderInternal
func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) {
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var gce *api.GCEPersistentDiskVolumeSource
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
gce = spec.Volume.GCEPersistentDisk
readOnly = gce.ReadOnly
} else {
gce = spec.PersistentVolume.Spec.GCEPersistentDisk
readOnly = spec.ReadOnly
}
pdName := gce.PDName
fsType := gce.FSType
partition := ""
if gce.Partition != 0 {
partition = strconv.Itoa(gce.Partition)
}
return &gcePersistentDiskBuilder{
gcePersistentDisk: &gcePersistentDisk{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
partition: partition,
mounter: mounter,
manager: manager,
plugin: plugin,
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
}
示例10: newMounterInternal
func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) {
// azures used directly in a pod have a ReadOnly flag set by the pod author.
// azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
azure, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
fsType := "ext4"
if azure.FSType != nil {
fsType = *azure.FSType
}
cachingMode := api.AzureDataDiskCachingNone
if azure.CachingMode != nil {
cachingMode = *azure.CachingMode
}
readOnly := false
if azure.ReadOnly != nil {
readOnly = *azure.ReadOnly
}
diskName := azure.DiskName
diskUri := azure.DataDiskURI
return &azureDiskMounter{
azureDisk: &azureDisk{
podUID: podUID,
volName: spec.Name(),
diskName: diskName,
diskUri: diskUri,
cachingMode: cachingMode,
mounter: mounter,
plugin: plugin,
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
示例11: newBuilderInternal
func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) {
source, readOnly := plugin.getRBDVolumeSource(spec)
pool := source.RBDPool
if pool == "" {
pool = "rbd"
}
id := source.RadosUser
if id == "" {
id = "admin"
}
keyring := source.Keyring
if keyring == "" {
keyring = "/etc/ceph/keyring"
}
return &rbdBuilder{
rbd: &rbd{
podUID: podUID,
volName: spec.Name(),
Image: source.RBDImage,
Pool: pool,
ReadOnly: readOnly,
manager: manager,
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
plugin: plugin,
},
Mon: source.CephMonitors,
Id: id,
Keyring: keyring,
Secret: secret,
fsType: source.FSType,
}, nil
}
示例12: newBuilderInternal
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) {
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var ebs *api.AWSElasticBlockStoreVolumeSource
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
ebs = spec.Volume.AWSElasticBlockStore
readOnly = ebs.ReadOnly
} else {
ebs = spec.PersistentVolume.Spec.AWSElasticBlockStore
readOnly = spec.ReadOnly
}
volumeID := ebs.VolumeID
fsType := ebs.FSType
partition := ""
if ebs.Partition != 0 {
partition = strconv.Itoa(ebs.Partition)
}
return &awsElasticBlockStoreBuilder{
awsElasticBlockStore: &awsElasticBlockStore{
podUID: podUID,
volName: spec.Name(),
volumeID: volumeID,
manager: manager,
mounter: mounter,
plugin: plugin,
},
fsType: fsType,
partition: partition,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil
}
示例13: newMounterInternal
// newMounterInternal is the internal mounter routine to build the volume.
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secrets map[string]string) (volume.Mounter, error) {
source, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
return &flexVolumeMounter{
flexVolumeDisk: &flexVolumeDisk{
podUID: pod.UID,
podNamespace: pod.Namespace,
podName: pod.Name,
volName: spec.Name(),
driverName: source.Driver,
execPath: plugin.getExecutable(),
mounter: mounter,
plugin: plugin,
secrets: secrets,
},
fsType: source.FSType,
readOnly: source.ReadOnly,
options: source.Options,
runner: runner,
manager: manager,
blockDeviceMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: runner},
}, nil
}
示例14: NewBuilder
func (plugin *secretPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
return &secretVolumeBuilder{
secretVolume: &secretVolume{spec.Name(), pod.UID, plugin, mounter},
secretName: spec.Volume.Secret.SecretName,
pod: *pod,
opts: &opts}, nil
}
示例15: newDeleter
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
}
path := spec.PersistentVolume.Spec.HostPath.Path
return &hostPathDeleter{name: spec.Name(), path: path, host: host}, nil
}