本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/client.Interface.Pods方法的典型用法代码示例。如果您正苦于以下问题:Golang Interface.Pods方法的具体用法?Golang Interface.Pods怎么用?Golang Interface.Pods使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/GoogleCloudPlatform/kubernetes/pkg/client.Interface
的用法示例。
在下文中一共展示了Interface.Pods方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewFirstContainerReady
func NewFirstContainerReady(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *FirstContainerReady {
return &FirstContainerReady{
timeout: timeout,
interval: interval,
podsForDeployment: func(deployment *kapi.ReplicationController) (*kapi.PodList, error) {
selector := labels.Set(deployment.Spec.Selector).AsSelector()
return kclient.Pods(deployment.Namespace).List(selector, fields.Everything())
},
getPodStore: func(namespace, name string) (cache.Store, chan struct{}) {
sel, _ := fields.ParseSelector("metadata.name=" + name)
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
lw := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return kclient.Pods(namespace).List(labels.Everything(), sel)
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return kclient.Pods(namespace).Watch(labels.Everything(), sel, resourceVersion)
},
}
stop := make(chan struct{})
cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
return store, stop
},
}
}
示例2: deletePods
func deletePods(kubeClient client.Interface, ns string) error {
items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())
if err != nil {
return err
}
for i := range items.Items {
err := kubeClient.Pods(ns).Delete(items.Items[i].Name)
if err != nil {
return err
}
}
return nil
}
示例3: NewRollingDeploymentStrategy
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy {
updaterClient := &rollingUpdaterClient{
ControllerHasDesiredReplicasFn: func(rc *kapi.ReplicationController) wait.ConditionFunc {
return kclient.ControllerHasDesiredReplicas(client, rc)
},
GetReplicationControllerFn: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
UpdateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Update(rc)
},
// This guards against the RollingUpdater's built-in behavior to create
// RCs when the supplied old RC is nil. We won't pass nil, but it doesn't
// hurt to further guard against it since we would have no way to identify
// or clean up orphaned RCs RollingUpdater might inadvertently create.
CreateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return nil, fmt.Errorf("unexpected attempt to create Deployment: %#v", rc)
},
// We give the RollingUpdater a policy which should prevent it from
// deleting the source deployment after the transition, but it doesn't
// hurt to guard by removing its ability to delete.
DeleteReplicationControllerFn: func(namespace, name string) error {
return fmt.Errorf("unexpected attempt to delete Deployment %s/%s", namespace, name)
},
}
return &RollingDeploymentStrategy{
codec: codec,
initialStrategy: initialStrategy,
client: updaterClient,
rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
updater := kubectl.NewRollingUpdater(namespace, updaterClient)
return updater.Update(config)
},
hookExecutor: &stratsupport.HookExecutor{
PodClient: &stratsupport.HookExecutorPodClientImpl{
CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
return client.Pods(namespace).Create(pod)
},
PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
},
},
},
getUpdateAcceptor: func(timeout time.Duration) kubectl.UpdateAcceptor {
return stratsupport.NewFirstContainerReady(client, timeout, NewFirstContainerReadyInterval)
},
}
}
示例4: NewPodWatch
// NewPodWatch creates a pod watching function which is backed by a
// FIFO/reflector pair. This avoids managing watches directly.
// A stop channel to close the watch's reflector is also returned.
// It is the caller's responsibility to defer closing the stop channel to prevent leaking resources.
func NewPodWatch(client kclient.Interface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
fieldSelector, _ := fields.ParseSelector("metadata.name=" + name)
podLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return client.Pods(namespace).List(labels.Everything(), fieldSelector)
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return client.Pods(namespace).Watch(labels.Everything(), fieldSelector, resourceVersion)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(podLW, &kapi.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)
return func() *kapi.Pod {
obj := queue.Pop()
return obj.(*kapi.Pod)
}
}
示例5: Update
// Update performs a rolling update of a collection of pods.
// 'name' points to a replication controller.
// 'client' is used for updating pods.
// 'updatePeriod' is the time between pod updates.
// 'imageName' is the new image to update for the template. This will work
// with the first container in the pod. There is no support yet for
// updating more complex replication controllers. If this is blank then no
// update of the image is performed.
func Update(ctx api.Context, name string, client client.Interface, updatePeriod time.Duration, imageName string) error {
// TODO ctx is not needed as input to this function, should just be 'namespace'
controller, err := client.ReplicationControllers(api.Namespace(ctx)).Get(name)
if err != nil {
return err
}
if len(imageName) != 0 {
controller.Spec.Template.Spec.Containers[0].Image = imageName
controller, err = client.ReplicationControllers(controller.Namespace).Update(controller)
if err != nil {
return err
}
}
s := labels.Set(controller.Spec.Selector).AsSelector()
podList, err := client.Pods(api.Namespace(ctx)).List(s)
if err != nil {
return err
}
expected := len(podList.Items)
if expected == 0 {
return nil
}
for _, pod := range podList.Items {
// We delete the pod here, the controller will recreate it. This will result in pulling
// a new Docker image. This isn't a full "update" but it's what we support for now.
err = client.Pods(pod.Namespace).Delete(pod.Name)
if err != nil {
return err
}
time.Sleep(updatePeriod)
}
return wait.Poll(time.Second*5, time.Second*300, func() (bool, error) {
podList, err := client.Pods(api.Namespace(ctx)).List(s)
if err != nil {
return false, err
}
return len(podList.Items) == expected, nil
})
}
示例6: NewRecreateDeploymentStrategy
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client))
return &RecreateDeploymentStrategy{
getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
scaler: scaler,
codec: codec,
hookExecutor: &stratsupport.HookExecutor{
PodClient: &stratsupport.HookExecutorPodClientImpl{
CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
return client.Pods(namespace).Create(pod)
},
PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
},
},
},
retryTimeout: 120 * time.Second,
retryPeriod: 1 * time.Second,
}
}
示例7: listPods
func listPods(client kclient.Interface) (*kapi.PodList, error) {
// get builds with new label
sel, err := labels.Parse(buildapi.BuildLabel)
if err != nil {
return nil, err
}
listNew, err := client.Pods(kapi.NamespaceAll).List(sel, fields.Everything())
if err != nil {
return nil, err
}
// FIXME: get builds with old label - remove this when depracated label will be removed
selOld, err := labels.Parse(buildapi.DeprecatedBuildLabel)
if err != nil {
return nil, err
}
listOld, err := client.Pods(kapi.NamespaceAll).List(selOld, fields.Everything())
if err != nil {
return nil, err
}
listNew.Items = mergeWithoutDuplicates(listNew.Items, listOld.Items)
return listNew, nil
}
示例8: NewAcceptNewlyObservedReadyPods
// NewAcceptNewlyObservedReadyPods makes a new AcceptNewlyObservedReadyPods
// from a real client.
func NewAcceptNewlyObservedReadyPods(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *AcceptNewlyObservedReadyPods {
return &AcceptNewlyObservedReadyPods{
timeout: timeout,
interval: interval,
acceptedPods: kutil.NewStringSet(),
getDeploymentPodStore: func(deployment *kapi.ReplicationController) (cache.Store, chan struct{}) {
selector := labels.Set(deployment.Spec.Selector).AsSelector()
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
lw := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return kclient.Pods(deployment.Namespace).List(selector, fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return kclient.Pods(deployment.Namespace).Watch(selector, fields.Everything(), resourceVersion)
},
}
stop := make(chan struct{})
cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
return store, stop
},
}
}
示例9: NewDeploymentConfigDescriber
// NewDeploymentConfigDescriber returns a new DeploymentConfigDescriber
func NewDeploymentConfigDescriber(client client.Interface, kclient kclient.Interface) *DeploymentConfigDescriber {
return &DeploymentConfigDescriber{
client: &genericDeploymentDescriberClient{
getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
return client.DeploymentConfigs(namespace).Get(name)
},
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
return kclient.ReplicationControllers(namespace).Get(name)
},
listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) {
return kclient.ReplicationControllers(namespace).List(selector)
},
listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) {
return kclient.Pods(namespace).List(selector, fields.Everything())
},
listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) {
return kclient.Events(deploymentConfig.Namespace).Search(deploymentConfig)
},
},
}
}
示例10: AddDeploymentKeyToReplicationController
func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client client.Interface, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) {
var err error
// First, update the template label. This ensures that any newly created pods will have the new label
if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
if rc.Spec.Template.Labels == nil {
rc.Spec.Template.Labels = map[string]string{}
}
rc.Spec.Template.Labels[deploymentKey] = deploymentValue
}); err != nil {
return nil, err
}
// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
// TODO: extract the code from the label command and re-use it here.
podList, err := client.Pods(namespace).List(labels.SelectorFromSet(oldRc.Spec.Selector), fields.Everything())
if err != nil {
return nil, err
}
for ix := range podList.Items {
pod := &podList.Items[ix]
if pod.Labels == nil {
pod.Labels = map[string]string{
deploymentKey: deploymentValue,
}
} else {
pod.Labels[deploymentKey] = deploymentValue
}
err = nil
delay := 3
for i := 0; i < MaxRetries; i++ {
_, err = client.Pods(namespace).Update(pod)
if err != nil {
fmt.Fprintf(out, "Error updating pod (%v), retrying after %d seconds", err, delay)
time.Sleep(time.Second * time.Duration(delay))
delay *= delay
} else {
break
}
}
if err != nil {
return nil, err
}
}
if oldRc.Spec.Selector == nil {
oldRc.Spec.Selector = map[string]string{}
}
// Copy the old selector, so that we can scrub out any orphaned pods
selectorCopy := map[string]string{}
for k, v := range oldRc.Spec.Selector {
selectorCopy[k] = v
}
oldRc.Spec.Selector[deploymentKey] = deploymentValue
// Update the selector of the rc so it manages all the pods we updated above
if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
rc.Spec.Selector[deploymentKey] = deploymentValue
}); err != nil {
return nil, err
}
// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
// doesn't see the update to its pod template and creates a new pod with the old labels after
// we've finished re-adopting existing pods to the rc.
podList, err = client.Pods(namespace).List(labels.SelectorFromSet(selectorCopy), fields.Everything())
for ix := range podList.Items {
pod := &podList.Items[ix]
if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue {
if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil {
return nil, err
}
}
}
return oldRc, nil
}
示例11: IncrementUsage
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation
// Return true if the usage must be recorded prior to admitting the new resource
// Return an error if the operation should not pass admission control
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) {
dirty := false
set := map[api.ResourceName]bool{}
for k := range status.Hard {
set[k] = true
}
obj := a.GetObject()
// handle max counts for each kind of resource (pods, services, replicationControllers, etc.)
if a.GetOperation() == admission.Create {
resourceName := resourceToResourceName[a.GetResource()]
hard, hardFound := status.Hard[resourceName]
if hardFound {
used, usedFound := status.Used[resourceName]
if !usedFound {
return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
}
if used.Value() >= hard.Value() {
return false, fmt.Errorf("Limited to %s %s", hard.String(), resourceName)
} else {
status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI)
dirty = true
}
}
}
// handle memory/cpu constraints, and any diff of usage based on memory/cpu on updates
if a.GetResource() == "pods" && (set[api.ResourceMemory] || set[api.ResourceCPU]) {
pod := obj.(*api.Pod)
deltaCPU := resourcequotacontroller.PodCPU(pod)
deltaMemory := resourcequotacontroller.PodMemory(pod)
// if this is an update, we need to find the delta cpu/memory usage from previous state
if a.GetOperation() == admission.Update {
oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name)
if err != nil {
return false, err
}
oldCPU := resourcequotacontroller.PodCPU(oldPod)
oldMemory := resourcequotacontroller.PodMemory(oldPod)
deltaCPU = resource.NewMilliQuantity(deltaCPU.MilliValue()-oldCPU.MilliValue(), resource.DecimalSI)
deltaMemory = resource.NewQuantity(deltaMemory.Value()-oldMemory.Value(), resource.DecimalSI)
}
hardMem, hardMemFound := status.Hard[api.ResourceMemory]
if hardMemFound {
if set[api.ResourceMemory] && resourcequotacontroller.IsPodMemoryUnbounded(pod) {
return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String())
}
used, usedFound := status.Used[api.ResourceMemory]
if !usedFound {
return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
}
if used.Value()+deltaMemory.Value() > hardMem.Value() {
return false, fmt.Errorf("Limited to %s memory", hardMem.String())
} else {
status.Used[api.ResourceMemory] = *resource.NewQuantity(used.Value()+deltaMemory.Value(), resource.DecimalSI)
dirty = true
}
}
hardCPU, hardCPUFound := status.Hard[api.ResourceCPU]
if hardCPUFound {
if set[api.ResourceCPU] && resourcequotacontroller.IsPodCPUUnbounded(pod) {
return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String())
}
used, usedFound := status.Used[api.ResourceCPU]
if !usedFound {
return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
}
if used.MilliValue()+deltaCPU.MilliValue() > hardCPU.MilliValue() {
return false, fmt.Errorf("Limited to %s CPU", hardCPU.String())
} else {
status.Used[api.ResourceCPU] = *resource.NewMilliQuantity(used.MilliValue()+deltaCPU.MilliValue(), resource.DecimalSI)
dirty = true
}
}
}
return dirty, nil
}