本文整理汇总了Golang中k8s/io/kubernetes/pkg/api.PodStatus.StartTime方法的典型用法代码示例。如果您正苦于以下问题:Golang PodStatus.StartTime方法的具体用法?Golang PodStatus.StartTime怎么用?Golang PodStatus.StartTime使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/api.PodStatus
的用法示例。
在下文中一共展示了PodStatus.StartTime方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
var oldStatus api.PodStatus
if cachedStatus, ok := m.podStatuses[pod.UID]; ok {
oldStatus = cachedStatus.status
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
oldStatus = mirrorPod.Status
} else {
oldStatus = pod.Status
}
// Set ReadyCondition.LastTransitionTime.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// ensure that the start time does not change across updates.
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
status.StartTime = oldStatus.StartTime
} else if status.StartTime.IsZero() {
// if the status has no start time, we need to set an initial time
now := unversioned.Now()
status.StartTime = &now
}
m.updateStatusInternal(pod, status)
}
示例2: updateStatusInternal
// updateStatusInternal updates the internal status cache, and queues an update to the api server if
// necessary. Returns whether an update was triggered.
// This method IS NOT THREAD SAFE and must be called from a locked function.
func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, forceUpdate bool) bool {
var oldStatus api.PodStatus
cachedStatus, isCached := m.podStatuses[pod.UID]
if isCached {
oldStatus = cachedStatus.status
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
oldStatus = mirrorPod.Status
} else {
oldStatus = pod.Status
}
// Set ReadyCondition.LastTransitionTime.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// ensure that the start time does not change across updates.
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
status.StartTime = oldStatus.StartTime
} else if status.StartTime.IsZero() {
// if the status has no start time, we need to set an initial time
now := unversioned.Now()
status.StartTime = &now
}
normalizeStatus(&status)
// The intent here is to prevent concurrent updates to a pod's status from
// clobbering each other so the phase of a pod progresses monotonically.
if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate {
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)
return false // No new status.
}
newStatus := versionedPodStatus{
status: status,
version: cachedStatus.version + 1,
podName: pod.Name,
podNamespace: pod.Namespace,
}
m.podStatuses[pod.UID] = newStatus
select {
case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}:
return true
default:
// Let the periodic syncBatch handle the update if the channel is full.
// We can't block, since we hold the mutex lock.
glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v",
format.Pod(pod), status)
return false
}
}
示例3: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
oldStatus, found := m.podStatuses[pod.UID]
// ensure that the start time does not change across updates.
if found && oldStatus.StartTime != nil {
status.StartTime = oldStatus.StartTime
}
// Set ReadyCondition.LastTransitionTime.
// Note we cannot do this while generating the status since we do not have oldStatus
// at that time for mirror pods.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
if found {
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// if the status has no start time, we need to set an initial time
// TODO(yujuhong): Consider setting StartTime when generating the pod
// status instead, which would allow manager to become a simple cache
// again.
if status.StartTime.IsZero() {
if pod.Status.StartTime.IsZero() {
// the pod did not have a previously recorded value so set to now
now := unversioned.Now()
status.StartTime = &now
} else {
// the pod had a recorded value, but the kubelet restarted so we need to rebuild cache
// based on last observed value
status.StartTime = pod.Status.StartTime
}
}
// TODO: Holding a lock during blocking operations is dangerous. Refactor so this isn't necessary.
// The intent here is to prevent concurrent updates to a pod's status from
// clobbering each other so the phase of a pod progresses monotonically.
// Currently this routine is not called for the same pod from multiple
// workers and/or the kubelet but dropping the lock before sending the
// status down the channel feels like an easy way to get a bullet in foot.
if !found || !isStatusEqual(&oldStatus, &status) || pod.DeletionTimestamp != nil {
m.podStatuses[pod.UID] = status
m.podStatusChannel <- podStatusSyncRequest{pod, status}
} else {
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletUtil.FormatPodName(pod), status)
}
}
示例4: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
oldStatus, found := m.podStatuses[pod.UID]
// ensure that the start time does not change across updates.
if found && oldStatus.status.StartTime != nil {
status.StartTime = oldStatus.status.StartTime
}
// Set ReadyCondition.LastTransitionTime.
// Note we cannot do this while generating the status since we do not have oldStatus
// at that time for mirror pods.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
if found {
oldReadyCondition := api.GetPodReadyCondition(oldStatus.status)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// if the status has no start time, we need to set an initial time
// TODO(yujuhong): Consider setting StartTime when generating the pod
// status instead, which would allow manager to become a simple cache
// again.
if status.StartTime.IsZero() {
if pod.Status.StartTime.IsZero() {
// the pod did not have a previously recorded value so set to now
now := unversioned.Now()
status.StartTime = &now
} else {
// the pod had a recorded value, but the kubelet restarted so we need to rebuild cache
// based on last observed value
status.StartTime = pod.Status.StartTime
}
}
newStatus := m.updateStatusInternal(pod, status)
if newStatus != nil {
select {
case m.podStatusChannel <- podStatusSyncRequest{pod.UID, *newStatus}:
default:
// Let the periodic syncBatch handle the update if the channel is full.
// We can't block, since we hold the mutex lock.
}
}
}
示例5: SetPodStatus
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
var oldStatus api.PodStatus
if cachedStatus, ok := m.podStatuses[pod.UID]; ok {
oldStatus = cachedStatus.status
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
oldStatus = mirrorPod.Status
} else {
oldStatus = pod.Status
}
// Set ReadyCondition.LastTransitionTime.
if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
// Need to set LastTransitionTime.
lastTransitionTime := unversioned.Now()
oldReadyCondition := api.GetPodReadyCondition(oldStatus)
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
lastTransitionTime = oldReadyCondition.LastTransitionTime
}
readyCondition.LastTransitionTime = lastTransitionTime
}
// ensure that the start time does not change across updates.
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
status.StartTime = oldStatus.StartTime
} else if status.StartTime.IsZero() {
// if the status has no start time, we need to set an initial time
now := unversioned.Now()
status.StartTime = &now
}
newStatus := m.updateStatusInternal(pod, status)
if newStatus != nil {
select {
case m.podStatusChannel <- podStatusSyncRequest{pod.UID, *newStatus}:
default:
// Let the periodic syncBatch handle the update if the channel is full.
// We can't block, since we hold the mutex lock.
}
}
}