本文整理汇总了Golang中k8s/io/kubernetes/pkg/kubelet/types.SortedContainerStatuses函数的典型用法代码示例。如果您正苦于以下问题:Golang SortedContainerStatuses函数的具体用法?Golang SortedContainerStatuses怎么用?Golang SortedContainerStatuses使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SortedContainerStatuses函数的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: isStatusEqual
// isStatusEqual returns true if the given pod statuses are equal, false otherwise.
// This method sorts container statuses so order does not affect equality.
func isStatusEqual(oldStatus, status *api.PodStatus) bool {
sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses))
sort.Sort(kubetypes.SortedContainerStatuses(oldStatus.ContainerStatuses))
// TODO: More sophisticated equality checking.
return reflect.DeepEqual(status, oldStatus)
}
示例2: normalizeStatus
// We add this function, because apiserver only supports *RFC3339* now, which means that the timestamp returned by
// apiserver has no nanosecond infromation. However, the timestamp returned by unversioned.Now() contains nanosecond,
// so when we do comparison between status from apiserver and cached status, isStatusEqual() will always return false.
// There is related issue #15262 and PR #15263 about this.
// In fact, the best way to solve this is to do it on api side. However for now, we normalize the status locally in
// kubelet temporarily.
// TODO(random-liu): Remove timestamp related logic after apiserver supports nanosecond or makes it consistent.
func normalizeStatus(status *api.PodStatus) *api.PodStatus {
normalizeTimeStamp := func(t *unversioned.Time) {
*t = t.Rfc3339Copy()
}
normalizeContainerState := func(c *api.ContainerState) {
if c.Running != nil {
normalizeTimeStamp(&c.Running.StartedAt)
}
if c.Terminated != nil {
normalizeTimeStamp(&c.Terminated.StartedAt)
normalizeTimeStamp(&c.Terminated.FinishedAt)
}
}
if status.StartTime != nil {
normalizeTimeStamp(status.StartTime)
}
for i := range status.Conditions {
condition := &status.Conditions[i]
normalizeTimeStamp(&condition.LastProbeTime)
normalizeTimeStamp(&condition.LastTransitionTime)
}
for i := range status.ContainerStatuses {
cstatus := &status.ContainerStatuses[i]
normalizeContainerState(&cstatus.State)
normalizeContainerState(&cstatus.LastTerminationState)
}
// Sort the container statuses, so that the order won't affect the result of comparison
sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses))
return status
}
示例3: ConvertPodStatusToAPIPodStatus
// TODO(yifan): Delete this function when the logic is moved to kubelet.
func (r *runtime) ConvertPodStatusToAPIPodStatus(pod *api.Pod, status *kubecontainer.PodStatus) (*api.PodStatus, error) {
apiPodStatus := &api.PodStatus{
PodIP: status.IP,
ContainerStatuses: make([]api.ContainerStatus, 0, 1),
}
containerStatuses := make(map[string]*api.ContainerStatus)
for _, c := range status.ContainerStatuses {
var st api.ContainerState
switch c.State {
case kubecontainer.ContainerStateRunning:
st.Running = &api.ContainerStateRunning{
StartedAt: unversioned.NewTime(c.StartedAt),
}
case kubecontainer.ContainerStateExited:
st.Terminated = &api.ContainerStateTerminated{
ExitCode: c.ExitCode,
StartedAt: unversioned.NewTime(c.StartedAt),
Reason: c.Reason,
Message: c.Message,
FinishedAt: unversioned.NewTime(c.FinishedAt),
ContainerID: c.ID.String(),
}
default:
// Unknown state.
st.Waiting = &api.ContainerStateWaiting{}
}
status, ok := containerStatuses[c.Name]
if !ok {
containerStatuses[c.Name] = &api.ContainerStatus{
Name: c.Name,
Image: c.Image,
ImageID: c.ImageID,
ContainerID: c.ID.String(),
RestartCount: c.RestartCount,
State: st,
}
continue
}
// Found multiple container statuses, fill that as last termination state.
if status.LastTerminationState.Waiting == nil &&
status.LastTerminationState.Running == nil &&
status.LastTerminationState.Terminated == nil {
status.LastTerminationState = st
}
}
for _, c := range pod.Spec.Containers {
cs, ok := containerStatuses[c.Name]
if !ok {
cs = &api.ContainerStatus{
Name: c.Name,
Image: c.Image,
// TODO(yifan): Add reason and message.
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{}},
}
}
apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *cs)
}
sort.Sort(kubetypes.SortedContainerStatuses(apiPodStatus.ContainerStatuses))
return apiPodStatus, nil
}
示例4: convertToAPIContainerStatuses
//.........这里部分代码省略.........
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]api.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*api.ContainerStatus, len(containers))
defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}}
if hasInitContainers {
defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}}
}
for _, container := range containers {
status := &api.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
}
// Apply some values from the old statuses as the default values.
if oldStatus, found := oldStatuses[container.Name]; found {
status.RestartCount = oldStatus.RestartCount
status.LastTerminationState = oldStatus.LastTerminationState
}
statuses[container.Name] = status
}
// Make the latest container status comes first.
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses)))
// Set container statuses according to the statuses seen in pod status
containerSeen := map[string]int{}
for _, cStatus := range podStatus.ContainerStatuses {
cName := cStatus.Name
if _, ok := statuses[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerSeen[cName] >= 2 {
continue
}
status := convertContainerStatus(cStatus)
if containerSeen[cName] == 0 {
statuses[cName] = status
} else {
statuses[cName].LastTerminationState = status.State
}
containerSeen[cName] = containerSeen[cName] + 1
}
// Handle the containers failed to be started, which should be in Waiting state.
for _, container := range containers {
if isInitContainer {
// If the init container is terminated with exit code 0, it won't be restarted.
// TODO(random-liu): Handle this in a cleaner way.
s := podStatus.FindContainerStatusByName(container.Name)
if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 {
continue
}
}
// If a container should be restarted in next syncpod, it is *Waiting*.
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
continue
}
status := statuses[container.Name]
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok {
// In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here.
// Note that with the current implementation of ShouldContainerBeRestarted the original state here
// could be:
// * Waiting: There is no associated historical container and start failure reason record.
// * Terminated: The container is terminated.
continue
}
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = api.ContainerState{
Waiting: &api.ContainerStateWaiting{
Reason: reason.Error(),
Message: message,
},
}
statuses[container.Name] = status
}
var containerStatuses []api.ContainerStatus
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod has a deterministic order.
if isInitContainer {
kubetypes.SortInitContainerStatuses(pod, containerStatuses)
} else {
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
}
return containerStatuses
}