当前位置: 首页>>代码示例>>Golang>>正文


Golang NodeCondition.LastTransitionTime方法代码示例

本文整理汇总了Golang中k8s/io/kubernetes/pkg/api.NodeCondition.LastTransitionTime方法的典型用法代码示例。如果您正苦于以下问题:Golang NodeCondition.LastTransitionTime方法的具体用法?Golang NodeCondition.LastTransitionTime怎么用?Golang NodeCondition.LastTransitionTime使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在k8s/io/kubernetes/pkg/api.NodeCondition的用法示例。


在下文中一共展示了NodeCondition.LastTransitionTime方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: setNodeReadyCondition

// Set Ready condition for the node.
func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
	// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
	// This is due to an issue with version skewed kubelet and master components.
	// ref: https://github.com/kubernetes/kubernetes/issues/16961
	currentTime := unversioned.NewTime(kl.clock.Now())
	var newNodeReadyCondition api.NodeCondition
	if rs := kl.runtimeState.errors(); len(rs) == 0 {
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionTrue,
			Reason:            "KubeletReady",
			Message:           "kubelet is posting ready status",
			LastHeartbeatTime: currentTime,
		}
	} else {
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionFalse,
			Reason:            "KubeletNotReady",
			Message:           strings.Join(rs, ","),
			LastHeartbeatTime: currentTime,
		}
	}

	// Record any soft requirements that were not met in the container manager.
	status := kl.containerManager.Status()
	if status.SoftRequirements != nil {
		newNodeReadyCondition.Message = fmt.Sprintf("%s. WARNING: %s", newNodeReadyCondition.Message, status.SoftRequirements.Error())
	}

	readyConditionUpdated := false
	needToRecordEvent := false
	for i := range node.Status.Conditions {
		if node.Status.Conditions[i].Type == api.NodeReady {
			if node.Status.Conditions[i].Status == newNodeReadyCondition.Status {
				newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
			} else {
				newNodeReadyCondition.LastTransitionTime = currentTime
				needToRecordEvent = true
			}
			node.Status.Conditions[i] = newNodeReadyCondition
			readyConditionUpdated = true
			break
		}
	}
	if !readyConditionUpdated {
		newNodeReadyCondition.LastTransitionTime = currentTime
		node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
	}
	if needToRecordEvent {
		if newNodeReadyCondition.Status == api.ConditionTrue {
			kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeReady)
		} else {
			kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotReady)
		}
	}
}
开发者ID:danielibrahim,项目名称:kubernetes,代码行数:58,代码来源:kubelet_node_status.go

示例2: setNodeInodePressureCondition

// setNodeInodePressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodeInodePressureCondition(node *api.Node) {
	currentTime := unversioned.NewTime(kl.clock.Now())
	var condition *api.NodeCondition

	// Check if NodeInodePressure condition already exists and if it does, just pick it up for update.
	for i := range node.Status.Conditions {
		if node.Status.Conditions[i].Type == api.NodeInodePressure {
			condition = &node.Status.Conditions[i]
		}
	}

	newCondition := false
	// If the NodeInodePressure condition doesn't exist, create one
	if condition == nil {
		condition = &api.NodeCondition{
			Type:   api.NodeInodePressure,
			Status: api.ConditionUnknown,
		}
		// cannot be appended to node.Status.Conditions here because it gets
		// copied to the slice. So if we append to the slice here none of the
		// updates we make below are reflected in the slice.
		newCondition = true
	}

	// Update the heartbeat time
	condition.LastHeartbeatTime = currentTime

	// Note: The conditions below take care of the case when a new NodeInodePressure condition is
	// created and as well as the case when the condition already exists. When a new condition
	// is created its status is set to api.ConditionUnknown which matches either
	// condition.Status != api.ConditionTrue or
	// condition.Status != api.ConditionFalse in the conditions below depending on whether
	// the kubelet is under inode pressure or not.
	if kl.evictionManager.IsUnderInodePressure() {
		if condition.Status != api.ConditionTrue {
			condition.Status = api.ConditionTrue
			condition.Reason = "KubeletHasInodePressure"
			condition.Message = "kubelet has inode pressure"
			condition.LastTransitionTime = currentTime
			kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasInodePressure")
		}
	} else {
		if condition.Status != api.ConditionFalse {
			condition.Status = api.ConditionFalse
			condition.Reason = "KubeletHasNoInodePressure"
			condition.Message = "kubelet has no inode pressure"
			condition.LastTransitionTime = currentTime
			kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasNoInodePressure")
		}
	}

	if newCondition {
		node.Status.Conditions = append(node.Status.Conditions, *condition)
	}

}
开发者ID:upmc-enterprises,项目名称:kubernetes,代码行数:58,代码来源:kubelet_node_status.go

示例3: nodeWithUpdatedStatus

// nodeWithUpdatedStatus clones the given node and updates the NodeReady condition.
// The updated node is return and a boolean indicating whether the node was changed
// at all.
func (u *StatusUpdater) nodeWithUpdatedStatus(n *api.Node) (*api.Node, bool, error) {
	readyCondition := getCondition(&n.Status, api.NodeReady)
	currentTime := unversioned.NewTime(u.nowFunc())

	// avoid flapping by waiting at least twice the kubetlet update frequency, i.e.
	// give the kubelet the chance twice to update the heartbeat. This is necessary
	// because we only poll the Mesos master state.json once in a while and we
	// know that that the information from there can easily be outdated.
	gracePeriod := u.heartBeatPeriod * 2
	if readyCondition != nil && !currentTime.After(readyCondition.LastHeartbeatTime.Add(gracePeriod)) {
		return n, false, nil
	}

	clone, err := api.Scheme.DeepCopy(n)
	if err != nil {
		return nil, false, err
	}
	n = clone.(*api.Node)

	newNodeReadyCondition := api.NodeCondition{
		Type:              api.NodeReady,
		Status:            api.ConditionTrue,
		Reason:            slaveReadyReason,
		Message:           slaveReadyMessage,
		LastHeartbeatTime: currentTime,
	}

	found := false
	for i := range n.Status.Conditions {
		c := &n.Status.Conditions[i]
		if c.Type == api.NodeReady {
			if c.Status == newNodeReadyCondition.Status {
				newNodeReadyCondition.LastTransitionTime = c.LastTransitionTime
			} else {
				newNodeReadyCondition.LastTransitionTime = currentTime
			}
			n.Status.Conditions[i] = newNodeReadyCondition
			found = true
			break
		}
	}

	if !found {
		newNodeReadyCondition.LastTransitionTime = currentTime
		n.Status.Conditions = append(n.Status.Conditions, newNodeReadyCondition)
	}

	return n, true, nil
}
开发者ID:wenhuizhang,项目名称:kubernetes,代码行数:52,代码来源:statusupdater.go

示例4: setNodeStatus


//.........这里部分代码省略.........
		glog.Errorf("Error getting machine info: %v", err)
	} else {
		node.Status.NodeInfo.MachineID = info.MachineID
		node.Status.NodeInfo.SystemUUID = info.SystemUUID
		node.Status.Capacity = CapacityFromMachineInfo(info)
		node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
			int64(nm.pods), resource.DecimalSI)
		if node.Status.NodeInfo.BootID != "" &&
			node.Status.NodeInfo.BootID != info.BootID {
			// TODO: This requires a transaction, either both node status is updated
			// and event is recorded or neither should happen, see issue #6055.
			nm.recorder.Eventf(nm.nodeRef, "Rebooted",
				"Node %s has been rebooted, boot id: %s", nm.nodeName, info.BootID)
		}
		node.Status.NodeInfo.BootID = info.BootID
	}

	verinfo, err := nm.infoGetter.GetVersionInfo()
	if err != nil {
		glog.Errorf("Error getting version info: %v", err)
	} else {
		node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion
		node.Status.NodeInfo.OsImage = verinfo.ContainerOsVersion
		// TODO: Determine the runtime is docker or rocket
		node.Status.NodeInfo.ContainerRuntimeVersion = "docker://" + verinfo.DockerVersion
		node.Status.NodeInfo.KubeletVersion = version.Get().String()
		// TODO: kube-proxy might be different version from kubelet in the future
		node.Status.NodeInfo.KubeProxyVersion = version.Get().String()
	}

	node.Status.DaemonEndpoints = *nm.daemonEndpoints

	// Check whether container runtime can be reported as up.
	containerRuntimeUp := nm.infoGetter.ContainerRuntimeUp()
	// Check whether network is configured properly
	networkConfigured := nm.infoGetter.NetworkConfigured()

	currentTime := unversioned.Now()
	var newNodeReadyCondition api.NodeCondition
	var oldNodeReadyConditionStatus api.ConditionStatus
	if containerRuntimeUp && networkConfigured {
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionTrue,
			Reason:            "KubeletReady",
			Message:           "kubelet is posting ready status",
			LastHeartbeatTime: currentTime,
		}
	} else {
		var reasons []string
		var messages []string
		if !containerRuntimeUp {
			messages = append(messages, "container runtime is down")
		}
		if !networkConfigured {
			messages = append(reasons, "network not configured correctly")
		}
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionFalse,
			Reason:            "KubeletNotReady",
			Message:           strings.Join(messages, ","),
			LastHeartbeatTime: currentTime,
		}
	}

	updated := false
	for i := range node.Status.Conditions {
		if node.Status.Conditions[i].Type == api.NodeReady {
			oldNodeReadyConditionStatus = node.Status.Conditions[i].Status
			if oldNodeReadyConditionStatus == newNodeReadyCondition.Status {
				newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
			} else {
				newNodeReadyCondition.LastTransitionTime = currentTime
			}
			node.Status.Conditions[i] = newNodeReadyCondition
			updated = true
		}
	}
	if !updated {
		newNodeReadyCondition.LastTransitionTime = currentTime
		node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
	}
	if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status {
		if newNodeReadyCondition.Status == api.ConditionTrue {
			nm.recordNodeStatusEvent("NodeReady")
		} else {
			nm.recordNodeStatusEvent("NodeNotReady")
		}
	}
	if oldNodeUnschedulable != node.Spec.Unschedulable {
		if node.Spec.Unschedulable {
			nm.recordNodeStatusEvent("NodeNotSchedulable")
		} else {
			nm.recordNodeStatusEvent("NodeSchedulable")
		}
		oldNodeUnschedulable = node.Spec.Unschedulable
	}
	return nil
}
开发者ID:liuhewei,项目名称:kubernetes,代码行数:101,代码来源:node_manager.go

示例5: setNodeOODCondition

// Set OODcondition for the node.
func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
	currentTime := unversioned.NewTime(kl.clock.Now())
	var nodeOODCondition *api.NodeCondition

	// Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update.
	for i := range node.Status.Conditions {
		if node.Status.Conditions[i].Type == api.NodeOutOfDisk {
			nodeOODCondition = &node.Status.Conditions[i]
		}
	}

	newOODCondition := false
	// If the NodeOutOfDisk condition doesn't exist, create one.
	if nodeOODCondition == nil {
		nodeOODCondition = &api.NodeCondition{
			Type:   api.NodeOutOfDisk,
			Status: api.ConditionUnknown,
		}
		// nodeOODCondition cannot be appended to node.Status.Conditions here because it gets
		// copied to the slice. So if we append nodeOODCondition to the slice here none of the
		// updates we make to nodeOODCondition below are reflected in the slice.
		newOODCondition = true
	}

	// Update the heartbeat time irrespective of all the conditions.
	nodeOODCondition.LastHeartbeatTime = currentTime

	// Note: The conditions below take care of the case when a new NodeOutOfDisk condition is
	// created and as well as the case when the condition already exists. When a new condition
	// is created its status is set to api.ConditionUnknown which matches either
	// nodeOODCondition.Status != api.ConditionTrue or
	// nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether
	// the kubelet is out of disk or not.
	if kl.isOutOfDisk() {
		if nodeOODCondition.Status != api.ConditionTrue {
			nodeOODCondition.Status = api.ConditionTrue
			nodeOODCondition.Reason = "KubeletOutOfDisk"
			nodeOODCondition.Message = "out of disk space"
			nodeOODCondition.LastTransitionTime = currentTime
			kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeOutOfDisk")
		}
	} else {
		if nodeOODCondition.Status != api.ConditionFalse {
			// Update the out of disk condition when the condition status is unknown even if we
			// are within the outOfDiskTransitionFrequency duration. We do this to set the
			// condition status correctly at kubelet startup.
			if nodeOODCondition.Status == api.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency {
				nodeOODCondition.Status = api.ConditionFalse
				nodeOODCondition.Reason = "KubeletHasSufficientDisk"
				nodeOODCondition.Message = "kubelet has sufficient disk space available"
				nodeOODCondition.LastTransitionTime = currentTime
				kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientDisk")
			} else {
				glog.Infof("Node condition status for OutOfDisk is false, but last transition time is less than %s", kl.outOfDiskTransitionFrequency)
			}
		}
	}

	if newOODCondition {
		node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition)
	}
}
开发者ID:danielibrahim,项目名称:kubernetes,代码行数:63,代码来源:kubelet_node_status.go

示例6: setNodeStatus

// setNodeStatus fills in the Status fields of the given Node, overwriting
// any fields that are currently set.
func (nm *realNodeManager) setNodeStatus(node *api.Node) error {

	// Set addresses for the node. These addresses may be stale if there is an
	// error retrieving an updated value, such as the cloudprovider API being
	// unavailable.
	if nm.cloud != nil {
		nm.setUpdatedAddressesFromCloud(node)
	} else {
		nm.setUpdatedAddressesFromHostname(node)
	}

	// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
	// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
	info, err := nm.infoGetter.GetMachineInfo()
	if err != nil {
		// TODO(roberthbailey): This is required for test-cmd.sh to pass.
		// See if the test should be updated instead.
		node.Status.Capacity = api.ResourceList{
			api.ResourceCPU:    *resource.NewMilliQuantity(0, resource.DecimalSI),
			api.ResourceMemory: resource.MustParse("0Gi"),
			api.ResourcePods:   *resource.NewQuantity(int64(nm.pods), resource.DecimalSI),
		}
		glog.Errorf("Error getting machine info: %v", err)
	} else {
		node.Status.NodeInfo.MachineID = info.MachineID
		node.Status.NodeInfo.SystemUUID = info.SystemUUID
		node.Status.Capacity = CapacityFromMachineInfo(info)
		node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
			int64(nm.pods), resource.DecimalSI)
		if node.Status.NodeInfo.BootID != "" &&
			node.Status.NodeInfo.BootID != info.BootID {
			// TODO: This requires a transaction, either both node status is updated
			// and event is recorded or neither should happen, see issue #6055.
			nm.recorder.Eventf(nm.nodeRef, "Rebooted",
				"Node %s has been rebooted, boot id: %s", nm.nodeName, info.BootID)
		}
		node.Status.NodeInfo.BootID = info.BootID
	}

	verinfo, err := nm.infoGetter.GetVersionInfo()
	if err != nil {
		glog.Errorf("Error getting version info: %v", err)
	} else {
		node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion
		node.Status.NodeInfo.OsImage = verinfo.ContainerOsVersion
		// TODO: Determine the runtime is docker or rocket
		node.Status.NodeInfo.ContainerRuntimeVersion = "docker://" + verinfo.DockerVersion
		node.Status.NodeInfo.KubeletVersion = version.Get().String()
		// TODO: kube-proxy might be different version from kubelet in the future
		node.Status.NodeInfo.KubeProxyVersion = version.Get().String()
	}

	node.Status.DaemonEndpoints = *nm.daemonEndpoints

	// Check whether container runtime can be reported as up.
	containerRuntimeUp := nm.infoGetter.ContainerRuntimeUp()
	// Check whether network is configured properly
	networkConfigured := nm.infoGetter.NetworkConfigured()

	currentTime := unversioned.Now()
	var newNodeReadyCondition api.NodeCondition
	var oldNodeReadyConditionStatus api.ConditionStatus
	if containerRuntimeUp && networkConfigured {
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionTrue,
			Reason:            "KubeletReady",
			Message:           "kubelet is posting ready status",
			LastHeartbeatTime: currentTime,
		}
	} else {
		var reasons []string
		var messages []string
		if !containerRuntimeUp {
			messages = append(messages, "container runtime is down")
		}
		if !networkConfigured {
			messages = append(reasons, "network not configured correctly")
		}
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionFalse,
			Reason:            "KubeletNotReady",
			Message:           strings.Join(messages, ","),
			LastHeartbeatTime: currentTime,
		}
	}

	updated := false
	for i := range node.Status.Conditions {
		if node.Status.Conditions[i].Type == api.NodeReady {
			oldNodeReadyConditionStatus = node.Status.Conditions[i].Status
			if oldNodeReadyConditionStatus == newNodeReadyCondition.Status {
				newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
			} else {
				newNodeReadyCondition.LastTransitionTime = currentTime
			}
			node.Status.Conditions[i] = newNodeReadyCondition
//.........这里部分代码省略.........
开发者ID:William-J-Earl,项目名称:kubernetes,代码行数:101,代码来源:node_manager.go


注:本文中的k8s/io/kubernetes/pkg/api.NodeCondition.LastTransitionTime方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。