当前位置: 首页>>代码示例>>Golang>>正文


Golang framework.ProviderIs函数代码示例

本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.ProviderIs函数的典型用法代码示例。如果您正苦于以下问题:Golang ProviderIs函数的具体用法?Golang ProviderIs怎么用?Golang ProviderIs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了ProviderIs函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: testLoadBalancerReachable

func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool {
	loadBalancerLagTimeout := loadBalancerLagTimeoutDefault
	if framework.ProviderIs("aws") {
		loadBalancerLagTimeout = loadBalancerLagTimeoutAWS
	}
	return testLoadBalancerReachableInTime(ingress, port, loadBalancerLagTimeout)
}
开发者ID:jeremyeder,项目名称:kubernetes,代码行数:7,代码来源:cluster_upgrade.go

示例2: NewRestartConfig

// NewRestartConfig creates a restartDaemonConfig for the given node and daemon.
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig {
	if !framework.ProviderIs("gce") {
		framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
	}
	return &restartDaemonConfig{
		nodeName:     nodeName,
		daemonName:   daemonName,
		healthzPort:  healthzPort,
		pollInterval: pollInterval,
		pollTimeout:  pollTimeout,
	}
}
开发者ID:vikaschoudhary16,项目名称:kubernetes,代码行数:13,代码来源:daemon_restart.go

示例3: newStorageClass

func newStorageClass() *storage.StorageClass {
	var pluginName string

	switch {
	case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
		pluginName = "kubernetes.io/gce-pd"
	case framework.ProviderIs("aws"):
		pluginName = "kubernetes.io/aws-ebs"
	case framework.ProviderIs("openstack"):
		pluginName = "kubernetes.io/cinder"
	}

	return &storage.StorageClass{
		TypeMeta: unversioned.TypeMeta{
			Kind: "StorageClass",
		},
		ObjectMeta: api.ObjectMeta{
			Name: "fast",
		},
		Provisioner: pluginName,
	}
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:22,代码来源:volume_provisioning.go

示例4:

var _ = framework.KubeDescribe("DNS", func() {
	f := framework.NewDefaultFramework("dns")

	It("should provide DNS for the cluster [Conformance]", func() {
		verifyDNSPodIsRunning(f)

		// All the names we need to be able to resolve.
		// TODO: Spin up a separate test service and test that dns works for that service.
		namesToResolve := []string{
			"kubernetes.default",
			"kubernetes.default.svc",
			"kubernetes.default.svc.cluster.local",
			"google.com",
		}
		// Added due to #8512. This is critical for GCE and GKE deployments.
		if framework.ProviderIs("gce", "gke") {
			namesToResolve = append(namesToResolve, "metadata")
		}

		wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "wheezy", f.Namespace.Name)
		jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "jessie", f.Namespace.Name)
		By("Running these commands on wheezy:" + wheezyProbeCmd + "\n")
		By("Running these commands on jessie:" + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
		validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
	})

	It("should provide DNS for services [Conformance]", func() {
开发者ID:jeremyeder,项目名称:kubernetes,代码行数:31,代码来源:dns.go

示例5: density30AddonResourceVerifier

func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
	var apiserverMem uint64
	var controllerMem uint64
	var schedulerMem uint64
	apiserverCPU := math.MaxFloat32
	apiserverMem = math.MaxUint64
	controllerCPU := math.MaxFloat32
	controllerMem = math.MaxUint64
	schedulerCPU := math.MaxFloat32
	schedulerMem = math.MaxUint64
	if framework.ProviderIs("kubemark") {
		if numNodes <= 5 {
			apiserverCPU = 0.15
			apiserverMem = 150 * (1024 * 1024)
			controllerCPU = 0.1
			controllerMem = 100 * (1024 * 1024)
			schedulerCPU = 0.05
			schedulerMem = 50 * (1024 * 1024)
		} else if numNodes <= 100 {
			apiserverCPU = 1.5
			apiserverMem = 1500 * (1024 * 1024)
			controllerCPU = 0.75
			controllerMem = 750 * (1024 * 1024)
			schedulerCPU = 0.75
			schedulerMem = 500 * (1024 * 1024)
		} else if numNodes <= 500 {
			apiserverCPU = 2.25
			apiserverMem = 2500 * (1024 * 1024)
			controllerCPU = 1.0
			controllerMem = 1100 * (1024 * 1024)
			schedulerCPU = 0.8
			schedulerMem = 500 * (1024 * 1024)
		} else if numNodes <= 1000 {
			apiserverCPU = 4
			apiserverMem = 4000 * (1024 * 1024)
			controllerCPU = 3
			controllerMem = 2000 * (1024 * 1024)
			schedulerCPU = 1.5
			schedulerMem = 750 * (1024 * 1024)
		}
	} else {
		if numNodes <= 100 {
			apiserverCPU = 1.5
			apiserverMem = 1300 * (1024 * 1024)
			controllerCPU = 0.5
			controllerMem = 300 * (1024 * 1024)
			schedulerCPU = 0.4
			schedulerMem = 150 * (1024 * 1024)
		}
	}

	constraints := make(map[string]framework.ResourceConstraint)
	constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
		CPUConstraint:    0.2,
		MemoryConstraint: 250 * (1024 * 1024),
	}
	constraints["elasticsearch-logging"] = framework.ResourceConstraint{
		CPUConstraint: 2,
		// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
		MemoryConstraint: 5000 * (1024 * 1024),
	}
	constraints["heapster"] = framework.ResourceConstraint{
		CPUConstraint:    2,
		MemoryConstraint: 1800 * (1024 * 1024),
	}
	constraints["kibana-logging"] = framework.ResourceConstraint{
		CPUConstraint:    0.2,
		MemoryConstraint: 100 * (1024 * 1024),
	}
	constraints["kube-proxy"] = framework.ResourceConstraint{
		CPUConstraint:    0.05,
		MemoryConstraint: 20 * (1024 * 1024),
	}
	constraints["l7-lb-controller"] = framework.ResourceConstraint{
		CPUConstraint:    0.05,
		MemoryConstraint: 20 * (1024 * 1024),
	}
	constraints["influxdb"] = framework.ResourceConstraint{
		CPUConstraint:    2,
		MemoryConstraint: 500 * (1024 * 1024),
	}
	constraints["kube-apiserver"] = framework.ResourceConstraint{
		CPUConstraint:    apiserverCPU,
		MemoryConstraint: apiserverMem,
	}
	constraints["kube-controller-manager"] = framework.ResourceConstraint{
		CPUConstraint:    controllerCPU,
		MemoryConstraint: controllerMem,
	}
	constraints["kube-scheduler"] = framework.ResourceConstraint{
		CPUConstraint:    schedulerCPU,
		MemoryConstraint: schedulerMem,
	}
	return constraints
}
开发者ID:CodeJuan,项目名称:kubernetes,代码行数:95,代码来源:density.go

示例6: density30AddonResourceVerifier

func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
	var apiserverMem uint64
	var controllerMem uint64
	var schedulerMem uint64
	apiserverCPU := math.MaxFloat32
	apiserverMem = math.MaxUint64
	controllerCPU := math.MaxFloat32
	controllerMem = math.MaxUint64
	schedulerCPU := math.MaxFloat32
	schedulerMem = math.MaxUint64
	framework.Logf("Setting resource constraings for provider: %s", framework.TestContext.Provider)
	if framework.ProviderIs("kubemark") {
		if numNodes <= 5 {
			apiserverCPU = 0.35
			apiserverMem = 150 * (1024 * 1024)
			controllerCPU = 0.1
			controllerMem = 100 * (1024 * 1024)
			schedulerCPU = 0.05
			schedulerMem = 50 * (1024 * 1024)
		} else if numNodes <= 100 {
			apiserverCPU = 1.5
			apiserverMem = 1500 * (1024 * 1024)
			controllerCPU = 0.75
			controllerMem = 750 * (1024 * 1024)
			schedulerCPU = 0.75
			schedulerMem = 500 * (1024 * 1024)
		} else if numNodes <= 500 {
			apiserverCPU = 2.5
			apiserverMem = 3400 * (1024 * 1024)
			controllerCPU = 1.3
			controllerMem = 1100 * (1024 * 1024)
			schedulerCPU = 1.5
			schedulerMem = 500 * (1024 * 1024)
		} else if numNodes <= 1000 {
			apiserverCPU = 4
			apiserverMem = 4000 * (1024 * 1024)
			controllerCPU = 3
			controllerMem = 2000 * (1024 * 1024)
			schedulerCPU = 1.5
			schedulerMem = 750 * (1024 * 1024)
		}
	} else {
		if numNodes <= 100 {
			// TODO: Investigate higher apiserver consumption and
			// potentially revert to 1.5cpu and 1.3GB - see #30871
			apiserverCPU = 1.8
			apiserverMem = 2200 * (1024 * 1024)
			controllerCPU = 0.5
			controllerMem = 300 * (1024 * 1024)
			schedulerCPU = 0.4
			schedulerMem = 150 * (1024 * 1024)
		}
	}

	constraints := make(map[string]framework.ResourceConstraint)
	constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
		CPUConstraint:    0.2,
		MemoryConstraint: 250 * (1024 * 1024),
	}
	constraints["elasticsearch-logging"] = framework.ResourceConstraint{
		CPUConstraint: 2,
		// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
		MemoryConstraint: 5000 * (1024 * 1024),
	}
	constraints["heapster"] = framework.ResourceConstraint{
		CPUConstraint:    2,
		MemoryConstraint: 1800 * (1024 * 1024),
	}
	constraints["kibana-logging"] = framework.ResourceConstraint{
		CPUConstraint:    0.2,
		MemoryConstraint: 100 * (1024 * 1024),
	}
	constraints["kube-proxy"] = framework.ResourceConstraint{
		CPUConstraint:    0.1,
		MemoryConstraint: 20 * (1024 * 1024),
	}
	constraints["l7-lb-controller"] = framework.ResourceConstraint{
		CPUConstraint:    0.15,
		MemoryConstraint: 60 * (1024 * 1024),
	}
	constraints["influxdb"] = framework.ResourceConstraint{
		CPUConstraint:    2,
		MemoryConstraint: 500 * (1024 * 1024),
	}
	constraints["kube-apiserver"] = framework.ResourceConstraint{
		CPUConstraint:    apiserverCPU,
		MemoryConstraint: apiserverMem,
	}
	constraints["kube-controller-manager"] = framework.ResourceConstraint{
		CPUConstraint:    controllerCPU,
		MemoryConstraint: controllerMem,
	}
	constraints["kube-scheduler"] = framework.ResourceConstraint{
		CPUConstraint:    schedulerCPU,
		MemoryConstraint: schedulerMem,
	}
	return constraints
}
开发者ID:vikaschoudhary16,项目名称:kubernetes,代码行数:98,代码来源:density.go

示例7:

// GCE Quota requirements: 3 pds, one per pet manifest declared above.
// GCE Api requirements: nodes and master need storage r/w permissions.
var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() {
	options := framework.FrameworkOptions{
		GroupVersion: &unversioned.GroupVersion{Group: apps.GroupName, Version: "v1alpha1"},
	}
	f := framework.NewFramework("petset", options, nil)
	var ns string
	var c *client.Client

	BeforeEach(func() {
		// PetSet is in alpha, so it's disabled on some platforms. We skip this
		// test if a resource get fails on non-GCE platforms.
		// In theory, tests that restart pets should pass on any platform with a
		// dynamic volume provisioner.
		if !framework.ProviderIs("gce") {
			framework.SkipIfMissingResource(f.ClientPool, unversioned.GroupVersionResource{Group: apps.GroupName, Version: "v1alpha1", Resource: "petsets"}, f.Namespace.Name)
		}

		c = f.Client
		ns = f.Namespace.Name
	})

	framework.KubeDescribe("Basic PetSet functionality", func() {
		psName := "pet"
		labels := map[string]string{
			"foo": "bar",
			"baz": "blah",
		}
		headlessSvcName := "test"
开发者ID:olegshaldybin,项目名称:kubernetes,代码行数:30,代码来源:petset.go

示例8:

		// result in huge "Endpoint" objects and all underlying pods read them
		// periodically. Moreover, all KubeProxies watch all of them.
		// Thus we limit the maximum number of pods under a service.
		//
		// TODO: Remove this limitation once services, endpoints and data flows
		// between nodes and master are better optimized.
		maxNodeCount := 250
		if len(nodes.Items) > maxNodeCount {
			nodes.Items = nodes.Items[:maxNodeCount]
		}

		if len(nodes.Items) == 1 {
			// in general, the test requires two nodes. But for local development, often a one node cluster
			// is created, for simplicity and speed. (see issue #10012). We permit one-node test
			// only in some cases
			if !framework.ProviderIs("local") {
				framework.Failf(fmt.Sprintf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider))
			}
			framework.Logf("Only one ready node is detected. The test has limited scope in such setting. " +
				"Rerun it with at least two nodes to get complete coverage.")
		}

		podNames := LaunchNetTestPodPerNode(f, nodes, svcname)

		// Clean up the pods
		defer func() {
			By("Cleaning up the webserver pods")
			for _, podName := range podNames {
				if err = f.Client.Pods(f.Namespace.Name).Delete(podName, nil); err != nil {
					framework.Logf("Failed to delete pod %s: %v", podName, err)
				}
开发者ID:juanluisvaladas,项目名称:origin,代码行数:31,代码来源:networking.go


注:本文中的k8s/io/kubernetes/test/e2e/framework.ProviderIs函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。