當前位置: 首頁>>代碼示例>>Golang>>正文


Golang util.FileExists函數代碼示例

本文整理匯總了Golang中k8s/io/kubernetes/pkg/util.FileExists函數的典型用法代碼示例。如果您正苦於以下問題:Golang FileExists函數的具體用法?Golang FileExists怎麽用?Golang FileExists使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。


在下文中一共展示了FileExists函數的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。

示例1: TestDeleter

func TestDeleter(t *testing.T) {
	tempPath := fmt.Sprintf("/tmp/hostpath/%s", util.NewUUID())
	defer os.RemoveAll(tempPath)
	err := os.MkdirAll(tempPath, 0750)
	if err != nil {
		t.Fatalf("Failed to create tmp directory for deleter: %v", err)
	}

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))

	spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
	plug, err := plugMgr.FindDeletablePluginBySpec(spec)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	deleter, err := plug.NewDeleter(spec)
	if err != nil {
		t.Errorf("Failed to make a new Deleter: %v", err)
	}
	if deleter.GetPath() != tempPath {
		t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
	}
	if err := deleter.Delete(); err != nil {
		t.Errorf("Mock Recycler expected to return nil but got %s", err)
	}
	if exists, _ := util.FileExists("foo"); exists {
		t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
	}
}
開發者ID:johndmulhausen,項目名稱:kubernetes,代碼行數:30,代碼來源:host_path_test.go

示例2: generateSSHKey

func (m *Master) generateSSHKey(user, privateKeyfile, publicKeyfile string) error {
	private, public, err := util.GenerateKey(2048)
	if err != nil {
		return err
	}
	// If private keyfile already exists, we must have only made it halfway
	// through last time, so delete it.
	exists, err := util.FileExists(privateKeyfile)
	if err != nil {
		glog.Errorf("Error detecting if private key exists: %v", err)
	} else if exists {
		glog.Infof("Private key exists, but public key does not")
		if err := os.Remove(privateKeyfile); err != nil {
			glog.Errorf("Failed to remove stale private key: %v", err)
		}
	}
	if err := ioutil.WriteFile(privateKeyfile, util.EncodePrivateKey(private), 0600); err != nil {
		return err
	}
	publicKeyBytes, err := util.EncodePublicKey(public)
	if err != nil {
		return err
	}
	if err := ioutil.WriteFile(publicKeyfile+".tmp", publicKeyBytes, 0600); err != nil {
		return err
	}
	return os.Rename(publicKeyfile+".tmp", publicKeyfile)
}
開發者ID:gogogocheng,項目名稱:kubernetes,代碼行數:28,代碼來源:master.go

示例3: getAppArmorFS

func getAppArmorFS() (string, error) {
	mountsFile, err := os.Open("/proc/mounts")
	if err != nil {
		return "", fmt.Errorf("could not open /proc/mounts: %v", err)
	}
	defer mountsFile.Close()

	scanner := bufio.NewScanner(mountsFile)
	for scanner.Scan() {
		fields := strings.Fields(scanner.Text())
		if len(fields) < 3 {
			// Unknown line format; skip it.
			continue
		}
		if fields[2] == "securityfs" {
			appArmorFS := path.Join(fields[1], "apparmor")
			if ok, err := util.FileExists(appArmorFS); !ok {
				msg := fmt.Sprintf("path %s does not exist", appArmorFS)
				if err != nil {
					return "", fmt.Errorf("%s: %v", msg, err)
				} else {
					return "", errors.New(msg)
				}
			} else {
				return appArmorFS, nil
			}
		}
	}
	if err := scanner.Err(); err != nil {
		return "", fmt.Errorf("error scanning mounts: %v", err)
	}

	return "", errors.New("securityfs not found")
}
開發者ID:invenfantasy,項目名稱:kubernetes,代碼行數:34,代碼來源:validate.go

示例4: Run

// Run establishes tunnel loops and returns
func (c *SSHTunneler) Run(getAddresses AddressFunc) {
	if c.stopChan != nil {
		return
	}
	c.stopChan = make(chan struct{})

	// Save the address getter
	if getAddresses != nil {
		c.getAddresses = getAddresses
	}

	// Usernames are capped @ 32
	if len(c.SSHUser) > 32 {
		glog.Warning("SSH User is too long, truncating to 32 chars")
		c.SSHUser = c.SSHUser[0:32]
	}
	glog.Infof("Setting up proxy: %s %s", c.SSHUser, c.SSHKeyfile)

	// public keyfile is written last, so check for that.
	publicKeyFile := c.SSHKeyfile + ".pub"
	exists, err := util.FileExists(publicKeyFile)
	if err != nil {
		glog.Errorf("Error detecting if key exists: %v", err)
	} else if !exists {
		glog.Infof("Key doesn't exist, attempting to create")
		err := c.generateSSHKey(c.SSHUser, c.SSHKeyfile, publicKeyFile)
		if err != nil {
			glog.Errorf("Failed to create key pair: %v", err)
		}
	}
	c.tunnels = &util.SSHTunnelList{}
	c.setupSecureProxy(c.SSHUser, c.SSHKeyfile, publicKeyFile)
	c.lastSync = c.clock.Now().Unix()
}
開發者ID:johndmulhausen,項目名稱:kubernetes,代碼行數:35,代碼來源:tunneler.go

示例5: validateSystemRequirements

// checks if the required cgroups subsystems are mounted.
// As of now, only 'cpu' and 'memory' are required.
// cpu quota is a soft requirement.
func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
	const (
		cgroupMountType = "cgroup"
		localErr        = "system validation failed"
	)
	var (
		cpuMountPoint string
		f             features
	)
	mountPoints, err := mountUtil.List()
	if err != nil {
		return f, fmt.Errorf("%s - %v", localErr, err)
	}

	expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory")
	for _, mountPoint := range mountPoints {
		if mountPoint.Type == cgroupMountType {
			for _, opt := range mountPoint.Opts {
				if expectedCgroups.Has(opt) {
					expectedCgroups.Delete(opt)
				}
				if opt == "cpu" {
					cpuMountPoint = mountPoint.Path
				}
			}
		}
	}

	if expectedCgroups.Len() > 0 {
		return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List())
	}

	// Check if cpu quota is available.
	// CPU cgroup is required and so it expected to be mounted at this point.
	periodExists, err := util.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us"))
	if err != nil {
		glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err)
	}
	quotaExists, err := util.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us"))
	if err != nil {
		glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err)
	}
	if quotaExists && periodExists {
		f.cpuHardcapping = true
	}
	return f, nil
}
開發者ID:XbinZh,項目名稱:kubernetes,代碼行數:50,代碼來源:container_manager_linux.go

示例6: TearDownAt

// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *cinderVolumeCleaner) TearDownAt(dir string) error {
	notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
	if err != nil {
		return err
	}

	exist, _ := util.FileExists(path.Join(dir, OpenStackCloudProviderTagFile))
	if exist {
		c.withOpenStackCP = false
	} else {
		c.withOpenStackCP = true
	}

	if notmnt {
		if !c.withOpenStackCP && c.isNoMountSupported {
			volumeID, err := ioutil.ReadFile(path.Join(dir, OpenStackCloudProviderTagFile))
			if err != nil {
				return err
			}

			c.pdName = string(volumeID)
			if err := c.manager.DetachDisk(c); err != nil {
				return err
			}
		}

		return os.RemoveAll(dir)
	}

	refs, err := mount.GetMountRefs(c.mounter, dir)
	if err != nil {
		return err
	}
	if err := c.mounter.Unmount(dir); err != nil {
		return err
	}
	glog.Infof("successfully unmounted: %s\n", dir)

	// If refCount is 1, then all bind mounts have been removed, and the
	// remaining reference is the global mount. It is safe to detach.
	if len(refs) == 1 {
		c.pdName = path.Base(refs[0])
		if err := c.manager.DetachDisk(c); err != nil {
			return err
		}
	}
	notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
	if mntErr != nil {
		glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
		return err
	}
	if !notmnt {
		if err := os.Remove(dir); err != nil {
			return err
		}
	}
	return nil
}
開發者ID:kuenzaa,項目名稱:hypernetes,代碼行數:60,代碼來源:cinder.go

示例7: init

// init initializes master.
func (m *Master) init(c *Config) {
	healthzChecks := []healthz.HealthzChecker{}
	m.clock = util.RealClock{}
	podStorage := podetcd.NewStorage(c.DatabaseStorage, c.KubeletClient)

	podTemplateStorage := podtemplateetcd.NewREST(c.DatabaseStorage)

	eventRegistry := event.NewEtcdRegistry(c.DatabaseStorage, uint64(c.EventTTL.Seconds()))
	limitRangeStorage := limitrangeetcd.NewStorage(c.DatabaseStorage)

	resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewStorage(c.DatabaseStorage)
	secretStorage := secretetcd.NewStorage(c.DatabaseStorage)
	serviceAccountStorage := serviceaccountetcd.NewStorage(c.DatabaseStorage)
	persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewStorage(c.DatabaseStorage)
	persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewStorage(c.DatabaseStorage)

	namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewStorage(c.DatabaseStorage)
	m.namespaceRegistry = namespace.NewRegistry(namespaceStorage)

	endpointsStorage := endpointsetcd.NewStorage(c.DatabaseStorage)
	m.endpointRegistry = endpoint.NewRegistry(endpointsStorage)

	nodeStorage, nodeStatusStorage := nodeetcd.NewStorage(c.DatabaseStorage, c.KubeletClient)
	m.nodeRegistry = minion.NewRegistry(nodeStorage)

	serviceStorage := serviceetcd.NewStorage(c.DatabaseStorage)
	m.serviceRegistry = service.NewRegistry(serviceStorage)

	var serviceClusterIPRegistry service.RangeRegistry
	serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface {
		mem := allocator.NewAllocationMap(max, rangeSpec)
		etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.DatabaseStorage)
		serviceClusterIPRegistry = etcd
		return etcd
	})
	m.serviceClusterIPAllocator = serviceClusterIPRegistry

	var serviceNodePortRegistry service.RangeRegistry
	serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface {
		mem := allocator.NewAllocationMap(max, rangeSpec)
		etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.DatabaseStorage)
		serviceNodePortRegistry = etcd
		return etcd
	})
	m.serviceNodePortAllocator = serviceNodePortRegistry

	controllerStorage := controlleretcd.NewREST(c.DatabaseStorage)

	// TODO: Factor out the core API registration
	m.storage = map[string]rest.Storage{
		"pods":             podStorage.Pod,
		"pods/attach":      podStorage.Attach,
		"pods/status":      podStorage.Status,
		"pods/log":         podStorage.Log,
		"pods/exec":        podStorage.Exec,
		"pods/portforward": podStorage.PortForward,
		"pods/proxy":       podStorage.Proxy,
		"pods/binding":     podStorage.Binding,
		"bindings":         podStorage.Binding,

		"podTemplates": podTemplateStorage,

		"replicationControllers": controllerStorage,
		"services":               service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator),
		"endpoints":              endpointsStorage,
		"nodes":                  nodeStorage,
		"nodes/status":           nodeStatusStorage,
		"events":                 event.NewStorage(eventRegistry),

		"limitRanges":                   limitRangeStorage,
		"resourceQuotas":                resourceQuotaStorage,
		"resourceQuotas/status":         resourceQuotaStatusStorage,
		"namespaces":                    namespaceStorage,
		"namespaces/status":             namespaceStatusStorage,
		"namespaces/finalize":           namespaceFinalizeStorage,
		"secrets":                       secretStorage,
		"serviceAccounts":               serviceAccountStorage,
		"persistentVolumes":             persistentVolumeStorage,
		"persistentVolumes/status":      persistentVolumeStatusStorage,
		"persistentVolumeClaims":        persistentVolumeClaimStorage,
		"persistentVolumeClaims/status": persistentVolumeClaimStatusStorage,

		"componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }),
	}

	// establish the node proxy dialer
	if len(c.SSHUser) > 0 {
		// Usernames are capped @ 32
		if len(c.SSHUser) > 32 {
			glog.Warning("SSH User is too long, truncating to 32 chars")
			c.SSHUser = c.SSHUser[0:32]
		}
		glog.Infof("Setting up proxy: %s %s", c.SSHUser, c.SSHKeyfile)

		// public keyfile is written last, so check for that.
		publicKeyFile := c.SSHKeyfile + ".pub"
		exists, err := util.FileExists(publicKeyFile)
		if err != nil {
			glog.Errorf("Error detecting if key exists: %v", err)
//.........這裏部分代碼省略.........
開發者ID:gogogocheng,項目名稱:kubernetes,代碼行數:101,代碼來源:master.go

示例8: TearDownAt

// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
	glog.V(5).Infof("Cinder TearDown of %s", dir)

	if _, err := os.Stat(dir); os.IsNotExist(err) {
		// non-exist dir for TearDown is meaningless and it is possible that this dir has been cleaned up, just omit the error for now
		glog.Warningf("Volume directory: %v does not exists, it may have been cleaned up by previous tear down task", dir)
		return nil
	}

	notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
	if err != nil {
		glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
		return err
	}

	exist, _ := util.FileExists(path.Join(dir, OpenStackCloudProviderTagFile))
	if exist {
		c.withOpenStackCP = false
	} else {
		c.withOpenStackCP = true
	}

	if notmnt {
		// Find Cinder volumeID to lock the right volume
		// TODO: refactor VolumePlugin.NewCleaner to get full volume.Spec just like
		// NewBuilder. We could then find volumeID there without probing MountRefs.
		if !c.withOpenStackCP && c.isNoMountSupported {
			volumeID, err := ioutil.ReadFile(path.Join(dir, OpenStackCloudProviderTagFile))
			if err != nil {
				return err
			}

			c.pdName = string(volumeID)
			if err := c.manager.DetachDisk(c); err != nil {
				return err
			}
		}

		return os.RemoveAll(dir)
	}

	// Find Cinder volumeID to lock the right volume
	// TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
	// NewMounter. We could then find volumeID there without probing MountRefs.
	refs, err := mount.GetMountRefs(c.mounter, dir)
	if err != nil {
		glog.V(4).Infof("GetMountRefs failed: %v", err)
		return err
	}
	if len(refs) == 0 {
		glog.V(4).Infof("Directory %s is not mounted", dir)
		return fmt.Errorf("directory %s is not mounted", dir)
	}
	c.pdName = path.Base(refs[0])
	glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)

	// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
	c.plugin.volumeLocks.LockKey(c.pdName)
	defer c.plugin.volumeLocks.UnlockKey(c.pdName)

	// Reload list of references, there might be SetUpAt finished in the meantime
	refs, err = mount.GetMountRefs(c.mounter, dir)
	if err != nil {
		glog.V(4).Infof("GetMountRefs failed: %v", err)
		return err
	}
	if err := c.mounter.Unmount(dir); err != nil {
		glog.V(4).Infof("Unmount failed: %v", err)
		return err
	}
	glog.V(3).Infof("Successfully unmounted: %s\n", dir)

	notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
	if mntErr != nil {
		glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
		return err
	}
	if notmnt {
		if err := os.Remove(dir); err != nil {
			glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
			return err
		}
	}
	return nil
}
開發者ID:hyperhq,項目名稱:hypernetes,代碼行數:87,代碼來源:cinder.go

示例9: TearDownAt

// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *cinderVolumeCleaner) TearDownAt(dir string) error {
	glog.V(5).Infof("Cinder TearDown of %s", dir)
	notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
	if err != nil {
		glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
		return err
	}

	exist, _ := util.FileExists(path.Join(dir, OpenStackCloudProviderTagFile))
	if exist {
		c.withOpenStackCP = false
	} else {
		c.withOpenStackCP = true
	}

	if notmnt {
		// Find Cinder volumeID to lock the right volume
		// TODO: refactor VolumePlugin.NewCleaner to get full volume.Spec just like
		// NewBuilder. We could then find volumeID there without probing MountRefs.

		if !c.withOpenStackCP && c.isNoMountSupported {
			volumeID, err := ioutil.ReadFile(path.Join(dir, OpenStackCloudProviderTagFile))
			if err != nil {
				return err
			}

			c.pdName = string(volumeID)
			if err := c.manager.DetachDisk(c); err != nil {
				return err
			}
		}

		return os.RemoveAll(dir)
	}

	refs, err := mount.GetMountRefs(c.mounter, dir)
	if err != nil {
		glog.V(4).Infof("GetMountRefs failed: %v", err)
		return err
	}
	if len(refs) == 0 {
		glog.V(4).Infof("Directory %s is not mounted", dir)
		return fmt.Errorf("directory %s is not mounted", dir)
	}
	c.pdName = path.Base(refs[0])
	glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)

	// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
	c.plugin.volumeLocks.LockKey(c.pdName)
	defer c.plugin.volumeLocks.UnlockKey(c.pdName)

	// Reload list of references, there might be SetUpAt finished in the meantime
	refs, err = mount.GetMountRefs(c.mounter, dir)
	if err != nil {
		glog.V(4).Infof("GetMountRefs failed: %v", err)
		return err
	}
	if err := c.mounter.Unmount(dir); err != nil {
		glog.V(4).Infof("Unmount failed: %v", err)
		return err
	}
	glog.V(3).Infof("Successfully unmounted: %s\n", dir)

	// If refCount is 1, then all bind mounts have been removed, and the
	// remaining reference is the global mount. It is safe to detach.
	if len(refs) == 1 {
		if err := c.manager.DetachDisk(c); err != nil {
			glog.V(4).Infof("DetachDisk failed: %v", err)
			return err
		}
		glog.V(3).Infof("Volume %s detached", c.pdName)
	}
	notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
	if mntErr != nil {
		glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
		return err
	}
	if notmnt {
		if err := os.Remove(dir); err != nil {
			glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
			return err
		}
	}
	return nil
}
開發者ID:thed00de,項目名稱:hypernetes,代碼行數:87,代碼來源:cinder.go


注:本文中的k8s/io/kubernetes/pkg/util.FileExists函數示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。