当前位置: 首页>>代码示例>>Golang>>正文


Golang framework.DeleteRCAndPods函数代码示例

本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.DeleteRCAndPods函数的典型用法代码示例。如果您正苦于以下问题:Golang DeleteRCAndPods函数的具体用法?Golang DeleteRCAndPods怎么用?Golang DeleteRCAndPods使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了DeleteRCAndPods函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: CleanUp

func (rc *ResourceConsumer) CleanUp() {
	By(fmt.Sprintf("Removing consuming RC %s", rc.name))
	close(rc.stopCPU)
	close(rc.stopMem)
	close(rc.stopCustomMetric)
	// Wait some time to ensure all child goroutines are finished.
	time.Sleep(10 * time.Second)
	framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.name))
	framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
	framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.controllerName))
	framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:12,代码来源:autoscaling_utils.go

示例2: runResourceTrackingTest

func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
	expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(framework.RunRC(testutils.RCConfig{
		Client:         f.ClientSet,
		InternalClient: f.InternalClientset,
		Name:           rcName,
		Namespace:      f.Namespace.Name,
		Image:          framework.GetPauseImageName(f.ClientSet),
		Replicas:       totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	rm.LogLatest()
	rm.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(f.ClientSet, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(f.ClientSet, nodeNames.List())
	usageSummary, err := rm.GetLatest()
	Expect(err).NotTo(HaveOccurred())
	// TODO(random-liu): Remove the original log when we migrate to new perfdash
	framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
	// Log perf result
	framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
	verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)

	cpuSummary := rm.GetCPUSummary()
	framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
	// Log perf result
	framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
	verifyCPULimits(expectedCPU, cpuSummary)

	By("Deleting the RC")
	framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:58,代码来源:kubelet_perf.go

示例3: deleteRC

func deleteRC(wg *sync.WaitGroup, config *testutils.RCConfig, deletingTime time.Duration) {
	defer GinkgoRecover()
	defer wg.Done()

	sleepUpTo(deletingTime)
	if framework.TestContext.GarbageCollectorEnabled {
		framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
	} else {
		framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
	}
}
开发者ID:jumpkick,项目名称:kubernetes,代码行数:11,代码来源:load.go

示例4: SpreadRCOrFail

// Check that the pods comprising a replication controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
	name := "ubelite-spread-rc-" + string(uuid.NewUUID())
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: f.Namespace.Name,
			Name:      name,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: &replicaCount,
			Selector: map[string]string{
				"name": name,
			},
			Template: &v1.PodTemplateSpec{
				ObjectMeta: metav1.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  name,
							Image: image,
							Ports: []v1.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()
	// List the pods, making sure we observe all the replicas.
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
	Expect(err).NotTo(HaveOccurred())

	// Wait for all of them to be scheduled
	By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled.  Selector: %v", replicaCount, name, selector))
	pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
	Expect(err).NotTo(HaveOccurred())

	// Now make sure they're spread across zones
	zoneNames, err := getZoneNames(f.ClientSet)
	Expect(err).NotTo(HaveOccurred())
	Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:53,代码来源:ubernetes_lite.go

示例5: cleanupDensityTest

func cleanupDensityTest(dtc DensityTestConfig) {
	defer GinkgoRecover()
	By("Deleting ReplicationController")
	// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
	for i := range dtc.Configs {
		rcName := dtc.Configs[i].Name
		rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName)
		if err == nil && rc.Spec.Replicas != 0 {
			if framework.TestContext.GarbageCollectorEnabled {
				By("Cleaning up only the replication controller, garbage collector will clean up the pods")
				err := framework.DeleteRCAndWaitForGC(dtc.Client, dtc.Namespace, rcName)
				framework.ExpectNoError(err)
			} else {
				By("Cleaning up the replication controller and pods")
				err := framework.DeleteRCAndPods(dtc.Client, dtc.ClientSet, dtc.Namespace, rcName)
				framework.ExpectNoError(err)
			}
		}
	}
}
开发者ID:vikaschoudhary16,项目名称:kubernetes,代码行数:20,代码来源:density.go

示例6:

	var totalMillicores int

	BeforeEach(func() {
		ns = f.Namespace.Name
		nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
		nodeCount := len(nodes.Items)
		Expect(nodeCount).NotTo(BeZero())

		cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
		totalMillicores = int((&cpu).MilliValue()) * nodeCount
	})

	It("should ensure that critical pod is scheduled in case there is no resources available", func() {
		By("reserving all available cpu")
		err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
		defer framework.DeleteRCAndPods(f.Client, ns, "reserve-all-cpu")
		framework.ExpectNoError(err)

		By("creating a new instance of DNS and waiting for DNS to be scheduled")
		label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
		listOpts := api.ListOptions{LabelSelector: label}
		rcs, err := f.Client.ReplicationControllers(api.NamespaceSystem).List(listOpts)
		framework.ExpectNoError(err)
		Expect(len(rcs.Items)).Should(Equal(1))

		rc := rcs.Items[0]
		replicas := uint(rc.Spec.Replicas)

		err = framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas+1, true)
		defer framework.ExpectNoError(framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas, true))
		framework.ExpectNoError(err)
开发者ID:vmware,项目名称:kubernetes,代码行数:31,代码来源:rescheduler.go

示例7:

}

var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
	var c *client.Client
	var nodeList *api.NodeList
	var systemPodsNo int
	var totalPodCapacity int64
	var RCName string
	var ns string
	ignoreLabels := framework.ImagePullerLabels

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := framework.DeleteRCAndPods(c, ns, RCName)
			framework.ExpectNoError(err)
		}
	})

	f := framework.NewDefaultFramework("sched-pred")

	BeforeEach(func() {
		c = f.Client
		ns = f.Namespace.Name
		nodeList = &api.NodeList{}

		masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(c)

		err := framework.CheckTestingNSDeletedExcept(c, ns)
		framework.ExpectNoError(err)
开发者ID:invenfantasy,项目名称:kubernetes,代码行数:31,代码来源:scheduler_predicates.go

示例8:

					Image:          framework.GetPauseImageName(f.ClientSet),
					Replicas:       totalPods,
					NodeSelector:   nodeLabels,
				})).NotTo(HaveOccurred())
				// Perform a sanity check so that we know all desired pods are
				// running on the nodes according to kubelet. The timeout is set to
				// only 30 seconds here because framework.RunRC already waited for all pods to
				// transition to the running status.
				Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, f.Namespace.Name, totalPods,
					time.Second*30)).NotTo(HaveOccurred())
				if resourceMonitor != nil {
					resourceMonitor.LogLatest()
				}

				By("Deleting the RC")
				framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
				// Check that the pods really are gone by querying /runningpods on the
				// node. The /runningpods handler checks the container runtime (or its
				// cache) and  returns a list of running pods. Some possible causes of
				// failures are:
				//   - kubelet deadlock
				//   - a bug in graceful termination (if it is enabled)
				//   - docker slow to delete pods (or resource problems causing slowness)
				start := time.Now()
				Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, f.Namespace.Name, 0,
					itArg.timeout)).NotTo(HaveOccurred())
				framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
					time.Since(start))
				if resourceMonitor != nil {
					resourceMonitor.LogCPUSummary()
				}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:31,代码来源:kubelet.go

示例9: proxyContext


//.........这里部分代码省略.........
			Replicas:     1,
			PollInterval: time.Second,
			Env: map[string]string{
				"SERVE_PORT_80":   `<a href="/rewriteme">test</a>`,
				"SERVE_PORT_1080": `<a href="/rewriteme">test</a>`,
				"SERVE_PORT_160":  "foo",
				"SERVE_PORT_162":  "bar",

				"SERVE_TLS_PORT_443": `<a href="/tlsrewriteme">test</a>`,
				"SERVE_TLS_PORT_460": `tls baz`,
				"SERVE_TLS_PORT_462": `tls qux`,
			},
			Ports: map[string]int{
				"dest1": 160,
				"dest2": 162,

				"tlsdest1": 460,
				"tlsdest2": 462,
			},
			ReadinessProbe: &api.Probe{
				Handler: api.Handler{
					HTTPGet: &api.HTTPGetAction{
						Port: intstr.FromInt(80),
					},
				},
				InitialDelaySeconds: 1,
				TimeoutSeconds:      5,
				PeriodSeconds:       10,
			},
			Labels:      labels,
			CreatedPods: &pods,
		}
		Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
		defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, cfg.Name)

		Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())

		// table constructors
		// Try proxying through the service and directly to through the pod.
		svcProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
		}
		subresourceServiceProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
		}
		podProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
		}
		subresourcePodProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
		}

		// construct the table
		expectations := map[string]string{
			svcProxyURL("", "portname1") + "/": "foo",
			svcProxyURL("", "80") + "/":        "foo",
			svcProxyURL("", "portname2") + "/": "bar",
			svcProxyURL("", "81") + "/":        "bar",

			svcProxyURL("http", "portname1") + "/": "foo",
			svcProxyURL("http", "80") + "/":        "foo",
			svcProxyURL("http", "portname2") + "/": "bar",
			svcProxyURL("http", "81") + "/":        "bar",

			svcProxyURL("https", "tlsportname1") + "/": "tls baz",
			svcProxyURL("https", "443") + "/":          "tls baz",
开发者ID:invenfantasy,项目名称:kubernetes,代码行数:67,代码来源:proxy.go

示例10: ServeImageOrFail

// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(f *framework.Framework, test string, image string) {
	name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
	replicas := int32(2)

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
		ObjectMeta: v1.ObjectMeta{
			Name: name,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: func(i int32) *int32 { return &i }(replicas),
			Selector: map[string]string{
				"name": name,
			},
			Template: &v1.PodTemplateSpec{
				ObjectMeta: v1.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  name,
							Image: image,
							Ports: []v1.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))

	pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		if pod.DeletionTimestamp != nil {
			continue
		}
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
	if err != nil {
		framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}
开发者ID:nak3,项目名称:kubernetes,代码行数:72,代码来源:rc.go

示例11:

					Image:        framework.GetPauseImageName(f.Client),
					Replicas:     totalPods,
					NodeSelector: nodeLabels,
				})).NotTo(HaveOccurred())
				// Perform a sanity check so that we know all desired pods are
				// running on the nodes according to kubelet. The timeout is set to
				// only 30 seconds here because framework.RunRC already waited for all pods to
				// transition to the running status.
				Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods,
					time.Second*30)).NotTo(HaveOccurred())
				if resourceMonitor != nil {
					resourceMonitor.LogLatest()
				}

				By("Deleting the RC")
				framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName)
				// Check that the pods really are gone by querying /runningpods on the
				// node. The /runningpods handler checks the container runtime (or its
				// cache) and  returns a list of running pods. Some possible causes of
				// failures are:
				//   - kubelet deadlock
				//   - a bug in graceful termination (if it is enabled)
				//   - docker slow to delete pods (or resource problems causing slowness)
				start := time.Now()
				Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, 0,
					itArg.timeout)).NotTo(HaveOccurred())
				framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
					time.Since(start))
				if resourceMonitor != nil {
					resourceMonitor.LogCPUSummary()
				}
开发者ID:ncdc,项目名称:kubernetes,代码行数:31,代码来源:kubelet.go

示例12: testNoWrappedVolumeRace

func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
	rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
	nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
	Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
	targetNode := nodeList.Items[0]

	By("Creating RC which spawns configmap-volume pods")
	affinity := &v1.Affinity{
		NodeAffinity: &v1.NodeAffinity{
			RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
				NodeSelectorTerms: []v1.NodeSelectorTerm{
					{
						MatchExpressions: []v1.NodeSelectorRequirement{
							{
								Key:      "kubernetes.io/hostname",
								Operator: v1.NodeSelectorOpIn,
								Values:   []string{targetNode.Name},
							},
						},
					},
				},
			},
		},
	}

	rc := &v1.ReplicationController{
		ObjectMeta: v1.ObjectMeta{
			Name: rcName,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: &podCount,
			Selector: map[string]string{
				"name": rcName,
			},
			Template: &v1.PodTemplateSpec{
				ObjectMeta: v1.ObjectMeta{
					Labels: map[string]string{"name": rcName},
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:    "test-container",
							Image:   "gcr.io/google_containers/busybox:1.24",
							Command: []string{"sleep", "10000"},
							Resources: v1.ResourceRequirements{
								Requests: v1.ResourceList{
									v1.ResourceCPU: resource.MustParse("10m"),
								},
							},
							VolumeMounts: volumeMounts,
						},
					},
					Affinity:  affinity,
					DNSPolicy: v1.DNSDefault,
					Volumes:   volumes,
				},
			},
		},
	}
	_, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rc)
	Expect(err).NotTo(HaveOccurred(), "error creating replication controller")

	defer func() {
		err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
		framework.ExpectNoError(err)
	}()

	pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		if pod.DeletionTimestamp != nil {
			continue
		}
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred(), "Failed waiting for pod %s to enter running state", pod.Name)
	}
}
开发者ID:johscheuer,项目名称:kubernetes,代码行数:81,代码来源:empty_dir_wrapper.go

示例13:

				},
				ReadinessProbe: &api.Probe{
					Handler: api.Handler{
						HTTPGet: &api.HTTPGetAction{
							Port: intstr.FromInt(80),
						},
					},
					InitialDelaySeconds: 1,
					TimeoutSeconds:      5,
					PeriodSeconds:       10,
				},
				Labels:      labels,
				CreatedPods: &pods,
			}
			Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
			defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, cfg.Name)

			Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())

			// table constructors
			// Try proxying through the service and directly to through the pod.
			svcProxyURL := func(scheme, port string) string {
				return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
			}
			subresourceServiceProxyURL := func(scheme, port string) string {
				return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
			}
			podProxyURL := func(scheme, port string) string {
				return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
			}
			subresourcePodProxyURL := func(scheme, port string) string {
开发者ID:miminar,项目名称:kubernetes,代码行数:31,代码来源:proxy.go

示例14:

	BeforeEach(func() {
		framework.SkipUnlessProviderIs("gce", "gke")
		ns = f.Namespace.Name
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeCount := len(nodes.Items)
		Expect(nodeCount).NotTo(BeZero())

		cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
		totalMillicores = int((&cpu).MilliValue()) * nodeCount
	})

	It("should ensure that critical pod is scheduled in case there is no resources available", func() {
		By("reserving all available cpu")
		err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "reserve-all-cpu")
		framework.ExpectNoError(err)

		By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
		label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
		listOpts := v1.ListOptions{LabelSelector: label.String()}
		deployments, err := f.ClientSet.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
		framework.ExpectNoError(err)
		Expect(len(deployments.Items)).Should(Equal(1))

		deployment := deployments.Items[0]
		replicas := uint(*(deployment.Spec.Replicas))

		err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas+1, true)
		defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas, true))
		framework.ExpectNoError(err)
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:30,代码来源:rescheduler.go

示例15:

var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
	var c *client.Client
	var cs clientset.Interface
	var nodeList *api.NodeList
	var systemPodsNo int
	var totalPodCapacity int64
	var RCName string
	var ns string
	f := framework.NewDefaultFramework("sched-pred")
	ignoreLabels := framework.ImagePullerLabels

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := framework.DeleteRCAndPods(c, f.ClientSet, ns, RCName)
			framework.ExpectNoError(err)
		}
	})

	BeforeEach(func() {
		c = f.Client
		cs = f.ClientSet
		ns = f.Namespace.Name
		nodeList = &api.NodeList{}

		framework.WaitForAllNodesHealthy(c, time.Minute)
		masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(c)

		err := framework.CheckTestingNSDeletedExcept(c, ns)
		framework.ExpectNoError(err)
开发者ID:Random-Liu,项目名称:kubernetes,代码行数:31,代码来源:scheduler_predicates.go


注:本文中的k8s/io/kubernetes/test/e2e/framework.DeleteRCAndPods函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。