本文整理匯總了Golang中k8s/io/kubernetes/pkg/api/v1.NewDeleteOptions函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewDeleteOptions函數的具體用法?Golang NewDeleteOptions怎麽用?Golang NewDeleteOptions使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewDeleteOptions函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: NewPodGC
func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInformer, terminatedPodThreshold int) *PodGCController {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
gcc := &PodGCController{
kubeClient: kubeClient,
terminatedPodThreshold: terminatedPodThreshold,
deletePod: func(namespace, name string) error {
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
return kubeClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0))
},
}
gcc.podStore.Indexer = podInformer.GetIndexer()
gcc.podController = podInformer.GetController()
gcc.nodeStore.Store, gcc.nodeController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return gcc.kubeClient.Core().Nodes().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return gcc.kubeClient.Core().Nodes().Watch(options)
},
},
&v1.Node{},
controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{},
)
return gcc
}
示例2: validateTargetedProbeOutput
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod")
pod, err := podClient.Get(pod.Name)
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
}
// Try to find the expected value for each expected name.
By("looking for the results for each expected name from probers")
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
示例3: deleteServiceOrFail
func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, serviceName string) {
if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName))
}
err := clientset.Services(namespace).Delete(serviceName, v1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
}
示例4: deleteClusterIngressOrFail
// TODO: quinton: This is largely a cut 'n paste of the above. Yuck! Refactor as soon as we have a common interface implmented by both fedclientset.Clientset and kubeclientset.Clientset
func deleteClusterIngressOrFail(clusterName string, clientset *kubeclientset.Clientset, namespace string, ingressName string) {
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteClusterIngressOrFail: cluster: %q, clientset: %v, namespace: %v, ingress: %v", clusterName, clientset, namespace, ingressName))
}
err := clientset.Ingresses(namespace).Delete(ingressName, v1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName)
}
示例5: runLivenessTest
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout)
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount {
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount)
}
}
observedRestarts = restartCount - initialRestartCount
if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
// Stop if we have observed more than expectNumRestarts restarts.
break
}
lastRestartCount = restartCount
}
// If we expected 0 restarts, fail if observed any restart.
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) {
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts)
}
}
示例6: syncPod
// syncPod syncs the given status with the API server. The caller must not hold the lock.
func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
if !m.needsUpdate(uid, status) {
glog.V(1).Infof("Status for pod %q is up-to-date; skipping", uid)
return
}
// TODO: make me easier to express from client code
pod, err := m.kubeClient.Core().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid)
// If the Pod is deleted the status will be cleared in
// RemoveOrphanedStatuses, so we just ignore the update here.
return
}
if err == nil {
translatedUID := m.podManager.TranslatePodUID(pod.UID)
if len(translatedUID) > 0 && translatedUID != uid {
glog.V(2).Infof("Pod %q was deleted and then recreated, skipping status update; old UID %q, new UID %q", format.Pod(pod), uid, translatedUID)
m.deletePodStatus(uid)
return
}
pod.Status = status.status
if err := podutil.SetInitContainersStatusesAnnotations(pod); err != nil {
glog.Error(err)
}
// TODO: handle conflict as a retry, make that easier too.
pod, err = m.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod)
if err == nil {
glog.V(3).Infof("Status for pod %q updated successfully: %+v", format.Pod(pod), status)
m.apiStatusVersions[pod.UID] = status.version
if kubepod.IsMirrorPod(pod) {
// We don't handle graceful deletion of mirror pods.
return
}
if pod.DeletionTimestamp == nil {
return
}
if !notRunning(pod.Status.ContainerStatuses) {
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
return
}
deleteOptions := v1.NewDeleteOptions(0)
// Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace.
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(pod.UID))
if err = m.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions); err == nil {
glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod))
m.deletePodStatus(uid)
return
}
}
}
// We failed to update status, wait for periodic sync to retry.
glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
}
示例7: deleteOneBackendPodOrFail
/*
deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
The test fails if there are any errors.
*/
func deleteOneBackendPodOrFail(c *cluster) {
pod := c.backendPod
Expect(pod).ToNot(BeNil())
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, v1.NewDeleteOptions(0))
if errors.IsNotFound(err) {
By(fmt.Sprintf("Pod %q in namespace %q in cluster %q does not exist. No need to delete it.", pod.Name, pod.Namespace, c.name))
} else {
framework.ExpectNoError(err, "Deleting pod %q in namespace %q from cluster %q", pod.Name, pod.Namespace, c.name)
}
By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
}
示例8: runPodAndGetNodeName
func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
// launch a pod to find a node which can launch a pod. We intentionally do
// not just take the node list and choose the first of them. Depending on the
// cluster and the scheduler it might be that a "normal" pod cannot be
// scheduled onto it.
pod := runPausePod(f, conf)
By("Explicitly delete pod here to free the resource it takes.")
err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
framework.ExpectNoError(err)
return pod.Spec.NodeName
}
示例9: DeleteMirrorPod
func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error {
if mc.apiserverClient == nil {
return nil
}
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
glog.Errorf("Failed to parse a pod full name %q", podFullName)
return err
}
glog.V(2).Infof("Deleting a mirror pod %q", podFullName)
// TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager
if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err)
}
return nil
}
示例10: deletePodsSync
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
var wg sync.WaitGroup
for _, pod := range pods {
wg.Add(1)
go func(pod *v1.Pod) {
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
}(pod)
}
wg.Wait()
return
}
示例11: DeleteNetProxyPod
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0]
config.getPodClient().Delete(pod.Name, v1.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted.
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err)
}
// wait for endpoint being removed.
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
}
// wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second)
}
示例12: discoverService
func discoverService(f *framework.Framework, name string, exists bool, podName string) {
command := []string{"sh", "-c", fmt.Sprintf("until nslookup '%s'; do sleep 10; done", name)}
By(fmt.Sprintf("Looking up %q", name))
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "federated-service-discovery-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: command,
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
nsName := f.FederationNamespace.Name
By(fmt.Sprintf("Creating pod %q in namespace %q", pod.Name, nsName))
_, err := f.ClientSet.Core().Pods(nsName).Create(pod)
framework.ExpectNoError(err, "Trying to create pod to run %q", command)
By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName))
defer func() {
By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName))
err := f.ClientSet.Core().Pods(nsName).Delete(podName, v1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName)
By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName))
}()
if exists {
// TODO(mml): Eventually check the IP address is correct, too.
Eventually(podExitCodeDetector(f, podName, nsName, 0), 3*DNSTTL, time.Second*2).
Should(BeNil(), "%q should exit 0, but it never did", command)
} else {
Eventually(podExitCodeDetector(f, podName, nsName, 0), 3*DNSTTL, time.Second*2).
ShouldNot(BeNil(), "%q should eventually not exit 0, but it always did", command)
}
}
示例13: checkExistingRCRecovers
func checkExistingRCRecovers(f *framework.Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
}
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
framework.Logf("apiserver has recovered")
return true, nil
}))
By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}
示例14: createGitServer
func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
var err error
gitServerPodName := "git-server-" + string(uuid.NewUUID())
containerPort := 8000
labels := map[string]string{"name": gitServerPodName}
gitServerPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: gitServerPodName,
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "git-repo",
Image: "gcr.io/google_containers/fakegitserver:0.1",
ImagePullPolicy: "IfNotPresent",
Ports: []v1.ContainerPort{
{ContainerPort: int32(containerPort)},
},
},
},
},
}
f.PodClient().CreateSync(gitServerPod)
// Portal IP and port
httpPort := 2345
gitServerSvc := &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "git-server-svc",
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{
{
Name: "http-portal",
Port: int32(httpPort),
TargetPort: intstr.FromInt(containerPort),
},
},
},
}
if gitServerSvc, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
}
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod")
if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
}
By("Cleaning up the git server svc")
if err := f.ClientSet.Core().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
}
}
}
示例15:
}
pods, err = podClient.List(options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
Expect(len(pods.Items)).To(Equal(1))
By("verifying pod creation was observed")
observeCreation(w)
// We need to wait for the pod to be scheduled, otherwise the deletion
// will be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("deleting the pod gracefully")
if err := podClient.Delete(pod.Name, v1.NewDeleteOptions(30)); err != nil {
framework.Failf("Failed to delete pod: %v", err)
}
By("verifying pod deletion was observed")
obj := observeObjectDeletion(w)
lastPod := obj.(*v1.Pod)
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
options = v1.ListOptions{LabelSelector: selector}
pods, err = podClient.List(options)
if err != nil {
framework.Failf("Failed to list pods to verify deletion: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))