本文整理汇总了Golang中k8s/io/kubernetes/pkg/api.PodStatus类的典型用法代码示例。如果您正苦于以下问题:Golang PodStatus类的具体用法?Golang PodStatus怎么用?Golang PodStatus使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PodStatus类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
var oldStatus api.PodStatus
if cachedStatus, ok := m.podStatuses[pod.UID]; ok {
oldStatus = cachedStatus.status
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
oldStatus = mirrorPod.Status
} else {
oldStatus = pod.Status
}
// Set ReadyCondition.LastTransitionTime.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// ensure that the start time does not change across updates.
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
status.StartTime = oldStatus.StartTime
} else if status.StartTime.IsZero() {
// if the status has no start time, we need to set an initial time
now := unversioned.Now()
status.StartTime = &now
}
m.updateStatusInternal(pod, status)
}
示例2: updateStatusInternal
// updateStatusInternal updates the internal status cache, and queues an update to the api server if
// necessary. Returns whether an update was triggered.
// This method IS NOT THREAD SAFE and must be called from a locked function.
func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, forceUpdate bool) bool {
var oldStatus api.PodStatus
cachedStatus, isCached := m.podStatuses[pod.UID]
if isCached {
oldStatus = cachedStatus.status
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
oldStatus = mirrorPod.Status
} else {
oldStatus = pod.Status
}
// Set ReadyCondition.LastTransitionTime.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// ensure that the start time does not change across updates.
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
status.StartTime = oldStatus.StartTime
} else if status.StartTime.IsZero() {
// if the status has no start time, we need to set an initial time
now := unversioned.Now()
status.StartTime = &now
}
normalizeStatus(&status)
// The intent here is to prevent concurrent updates to a pod's status from
// clobbering each other so the phase of a pod progresses monotonically.
if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate {
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)
return false // No new status.
}
newStatus := versionedPodStatus{
status: status,
version: cachedStatus.version + 1,
podName: pod.Name,
podNamespace: pod.Namespace,
}
m.podStatuses[pod.UID] = newStatus
select {
case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}:
return true
default:
// Let the periodic syncBatch handle the update if the channel is full.
// We can't block, since we hold the mutex lock.
glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v",
format.Pod(pod), status)
return false
}
}
示例3: toPodStatus
// toPodStatus converts a podInfo type into an api.PodStatus type.
func (p *podInfo) toPodStatus(pod *kubecontainer.Pod) api.PodStatus {
var status api.PodStatus
status.PodIP = p.ip
// For now just make every container's state the same as the pod.
for _, container := range pod.Containers {
status.ContainerStatuses = append(status.ContainerStatuses, p.getContainerStatus(container))
}
return status
}
示例4: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
oldStatus, found := m.podStatuses[pod.UID]
// ensure that the start time does not change across updates.
if found && oldStatus.StartTime != nil {
status.StartTime = oldStatus.StartTime
}
// Set ReadyCondition.LastTransitionTime.
// Note we cannot do this while generating the status since we do not have oldStatus
// at that time for mirror pods.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
if found {
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// if the status has no start time, we need to set an initial time
// TODO(yujuhong): Consider setting StartTime when generating the pod
// status instead, which would allow manager to become a simple cache
// again.
if status.StartTime.IsZero() {
if pod.Status.StartTime.IsZero() {
// the pod did not have a previously recorded value so set to now
now := unversioned.Now()
status.StartTime = &now
} else {
// the pod had a recorded value, but the kubelet restarted so we need to rebuild cache
// based on last observed value
status.StartTime = pod.Status.StartTime
}
}
// TODO: Holding a lock during blocking operations is dangerous. Refactor so this isn't necessary.
// The intent here is to prevent concurrent updates to a pod's status from
// clobbering each other so the phase of a pod progresses monotonically.
// Currently this routine is not called for the same pod from multiple
// workers and/or the kubelet but dropping the lock before sending the
// status down the channel feels like an easy way to get a bullet in foot.
if !found || !isStatusEqual(&oldStatus, &status) || pod.DeletionTimestamp != nil {
m.podStatuses[pod.UID] = status
m.podStatusChannel <- podStatusSyncRequest{pod, status}
} else {
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletUtil.FormatPodName(pod), status)
}
}
示例5: makePodStatus
// makePodStatus constructs the pod status from the pod info and rkt info.
func makePodStatus(pod *kubecontainer.Pod, podInfo *podInfo, rktInfo *rktInfo) api.PodStatus {
var status api.PodStatus
status.PodIP = podInfo.ip
// For now just make every container's state the same as the pod.
for _, container := range pod.Containers {
containerStatus := makeContainerStatus(container, podInfo)
containerStatus.RestartCount = rktInfo.restartCount
status.ContainerStatuses = append(status.ContainerStatuses, containerStatus)
}
return status
}
示例6: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
oldStatus, found := m.podStatuses[pod.UID]
// ensure that the start time does not change across updates.
if found && oldStatus.status.StartTime != nil {
status.StartTime = oldStatus.status.StartTime
}
// Set ReadyCondition.LastTransitionTime.
// Note we cannot do this while generating the status since we do not have oldStatus
// at that time for mirror pods.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
if found {
oldReadyCondition := api.GetPodReadyCondition(oldStatus.status)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// if the status has no start time, we need to set an initial time
// TODO(yujuhong): Consider setting StartTime when generating the pod
// status instead, which would allow manager to become a simple cache
// again.
if status.StartTime.IsZero() {
if pod.Status.StartTime.IsZero() {
// the pod did not have a previously recorded value so set to now
now := unversioned.Now()
status.StartTime = &now
} else {
// the pod had a recorded value, but the kubelet restarted so we need to rebuild cache
// based on last observed value
status.StartTime = pod.Status.StartTime
}
}
newStatus := m.updateStatusInternal(pod, status)
if newStatus != nil {
select {
case m.podStatusChannel <- podStatusSyncRequest{pod.UID, *newStatus}:
default:
// Let the periodic syncBatch handle the update if the channel is full.
// We can't block, since we hold the mutex lock.
}
}
}
示例7: TestStaleUpdates
func TestStaleUpdates(t *testing.T) {
pod := getTestPod()
client := fake.NewSimpleClientset(pod)
m := newTestManager(client)
status := api.PodStatus{Message: "initial status"}
m.SetPodStatus(pod, status)
status.Message = "first version bump"
m.SetPodStatus(pod, status)
status.Message = "second version bump"
m.SetPodStatus(pod, status)
verifyUpdates(t, m, 3)
t.Logf("First sync pushes latest status.")
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
})
client.ClearActions()
for i := 0; i < 2; i++ {
t.Logf("Next 2 syncs should be ignored (%d).", i)
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{})
}
t.Log("Unchanged status should not send an update.")
m.SetPodStatus(pod, status)
verifyUpdates(t, m, 0)
t.Log("... unless it's stale.")
m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1
m.SetPodStatus(pod, status)
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
})
// Nothing stuck in the pipe.
verifyUpdates(t, m, 0)
}
示例8: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
var oldStatus api.PodStatus
if cachedStatus, ok := m.podStatuses[pod.UID]; ok {
oldStatus = cachedStatus.status
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
oldStatus = mirrorPod.Status
} else {
oldStatus = pod.Status
}
// Set ReadyCondition.LastTransitionTime.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// ensure that the start time does not change across updates.
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
status.StartTime = oldStatus.StartTime
} else if status.StartTime.IsZero() {
// if the status has no start time, we need to set an initial time
now := unversioned.Now()
status.StartTime = &now
}
newStatus := m.updateStatusInternal(pod, status)
if newStatus != nil {
select {
case m.podStatusChannel <- podStatusSyncRequest{pod.UID, *newStatus}:
default:
// Let the periodic syncBatch handle the update if the channel is full.
// We can't block, since we hold the mutex lock.
}
}
}
示例9: convertStatusToAPIStatus
// convertStatusToAPIStatus creates an api PodStatus for the given pod from
// the given internal pod status. It is purely transformative and does not
// alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus {
var apiPodStatus api.PodStatus
apiPodStatus.PodIP = podStatus.IP
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.ContainerStatuses,
pod.Spec.Containers,
len(pod.Spec.InitContainers) > 0,
false,
)
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.InitContainerStatuses,
pod.Spec.InitContainers,
len(pod.Spec.InitContainers) > 0,
true,
)
return &apiPodStatus
}
示例10: GetPodStatus
// GetPodStatus retrieves the status of the pod, including the information of
// all containers in the pod. Clients of this interface assume the containers
// statuses in a pod always have a deterministic ordering (eg: sorted by name).
func (r *runtime) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
podInfos, err := r.hyperClient.ListPods()
if err != nil {
glog.Errorf("Hyper: ListPods failed, error: %s", err)
return nil, err
}
var status api.PodStatus
podFullName := r.buildHyperPodFullName(string(pod.UID), string(pod.Name), string(pod.Namespace))
for _, podInfo := range podInfos {
if podInfo.PodName != podFullName {
continue
}
if len(podInfo.PodInfo.Status.PodIP) > 0 {
status.PodIP = podInfo.PodInfo.Status.PodIP[0]
}
status.HostIP = podInfo.PodInfo.Status.HostIP
status.Phase = api.PodPhase(podInfo.PodInfo.Status.Phase)
status.Message = podInfo.PodInfo.Status.Message
status.Reason = podInfo.PodInfo.Status.Reason
for _, containerInfo := range podInfo.PodInfo.Status.Status {
for _, container := range podInfo.PodInfo.Spec.Containers {
if container.ContainerID == containerInfo.ContainerID {
status.ContainerStatuses = append(
status.ContainerStatuses,
r.getContainerStatus(containerInfo, container.Image, container.ImageID))
}
}
}
}
glog.V(5).Infof("Hyper: get pod %s status %s", podFullName, status)
return &status, nil
}