本文整理汇总了Golang中github.com/openshift/origin/pkg/api/graph.Namer.ResourceName方法的典型用法代码示例。如果您正苦于以下问题:Golang Namer.ResourceName方法的具体用法?Golang Namer.ResourceName怎么用?Golang Namer.ResourceName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/openshift/origin/pkg/api/graph.Namer
的用法示例。
在下文中一共展示了Namer.ResourceName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: FindMissingLivenessProbes
// FindMissingLivenessProbes inspects all PodSpecs for missing liveness probes and generates a list of non-duplicate markers
func FindMissingLivenessProbes(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) {
podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode)
if hasLivenessProbe(podSpecNode) {
continue
}
topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode)
// skip any podSpec nodes that are managed by other nodes.
// Liveness probes should only be applied to a controlling
// podSpec node, and not to any of its children.
if hasControllerRefEdge(g, topLevelNode) {
continue
}
topLevelString := f.ResourceName(topLevelNode)
markers = append(markers, osgraph.Marker{
Node: podSpecNode,
RelatedNodes: []graph.Node{topLevelNode},
Severity: osgraph.InfoSeverity,
Key: MissingLivenessProbeWarning,
Message: fmt.Sprintf("%s has no liveness probe to verify pods are still running.",
topLevelString),
Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --liveness ...", setProbeCommand, topLevelString)),
})
}
return markers
}
示例2: FindRestartingPods
// FindRestartingPods inspects all Pods to see if they've restarted more than the threshold
func FindRestartingPods(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastPodNode := range g.NodesByKind(kubegraph.PodNodeKind) {
podNode := uncastPodNode.(*kubegraph.PodNode)
pod, ok := podNode.Object().(*kapi.Pod)
if !ok {
continue
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.RestartCount >= RestartThreshold {
markers = append(markers, osgraph.Marker{
Node: podNode,
Severity: osgraph.WarningSeverity,
Key: RestartingPodWarning,
Message: fmt.Sprintf("container %q in %s has restarted %d times", containerStatus.Name,
f.ResourceName(podNode), containerStatus.RestartCount),
})
}
}
}
return markers
}
示例3: FindDeploymentConfigReadinessWarnings
// FindDeploymentConfigReadinessWarnings inspects deploymentconfigs and reports those that
// don't have readiness probes set up.
func FindDeploymentConfigReadinessWarnings(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker {
markers := []osgraph.Marker{}
Node:
for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) {
dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode)
if t := dcNode.DeploymentConfig.Spec.Template; t != nil && len(t.Spec.Containers) > 0 {
for _, container := range t.Spec.Containers {
if container.ReadinessProbe != nil {
continue Node
}
}
// All of the containers in the deployment config lack a readiness probe
markers = append(markers, osgraph.Marker{
Node: uncastDcNode,
Severity: osgraph.WarningSeverity,
Key: MissingReadinessProbeWarning,
Message: fmt.Sprintf("%s has no readiness probe to verify pods are ready to accept traffic or ensure deployment is successful.",
f.ResourceName(dcNode)),
Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --readiness ...", setProbeCommand, f.ResourceName(dcNode))),
})
continue Node
}
}
return markers
}
示例4: FindMissingSecrets
// FindMissingSecrets inspects all PodSpecs for any Secret reference that is a synthetic node (not a pre-existing node in the graph)
func FindMissingSecrets(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) {
podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode)
missingSecrets := CheckMissingMountedSecrets(g, podSpecNode)
topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode)
topLevelString := f.ResourceName(topLevelNode)
for _, missingSecret := range missingSecrets {
markers = append(markers, osgraph.Marker{
Node: podSpecNode,
RelatedNodes: []graph.Node{missingSecret},
Severity: osgraph.WarningSeverity,
Key: UnmountableSecretWarning,
Message: fmt.Sprintf("%s is attempting to mount a missing secret %s",
topLevelString, f.ResourceName(missingSecret)),
})
}
}
return markers
}
示例5: FindCircularBuilds
// FindCircularBuilds checks all build configs for cycles
func FindCircularBuilds(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
// Filter out all but ImageStreamTag and BuildConfig nodes
nodeFn := osgraph.NodesOfKind(imagegraph.ImageStreamTagNodeKind, buildgraph.BuildConfigNodeKind)
// Filter out all but BuildInputImage and BuildOutput edges
edgeFn := osgraph.EdgesOfKind(buildedges.BuildInputImageEdgeKind, buildedges.BuildOutputEdgeKind)
// Create desired subgraph
sub := g.Subgraph(nodeFn, edgeFn)
markers := []osgraph.Marker{}
// Check for cycles
for _, cycle := range topo.CyclesIn(sub) {
nodeNames := []string{}
for _, node := range cycle {
nodeNames = append(nodeNames, f.ResourceName(node))
}
markers = append(markers, osgraph.Marker{
Node: cycle[0],
RelatedNodes: cycle,
Severity: osgraph.WarningSeverity,
Key: CyclicBuildConfigWarning,
Message: fmt.Sprintf("Cycle detected in build configurations: %s", strings.Join(nodeNames, " -> ")),
})
}
return markers
}
示例6: findPendingTagMarkers
// findPendingTagMarkers is the guts behind FindPendingTags .... break out some of the content and reduce some indentation
func findPendingTagMarkers(istNode *imagegraph.ImageStreamTagNode, g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
buildFound := false
bcNodes := buildedges.BuildConfigsForTag(g, graph.Node(istNode))
for _, bcNode := range bcNodes {
latestBuild := buildedges.GetLatestBuild(g, bcNode)
// A build config points to the non existent tag but no current build exists.
if latestBuild == nil {
continue
}
buildFound = true
// A build config points to the non existent tag but something is going on with
// the latest build.
// TODO: Handle other build phases.
switch latestBuild.Build.Status.Phase {
case buildapi.BuildPhaseCancelled:
// TODO: Add a warning here.
case buildapi.BuildPhaseError:
// TODO: Add a warning here.
case buildapi.BuildPhaseComplete:
// We should never hit this. The output of our build is missing but the build is complete.
// Most probably the user has messed up?
case buildapi.BuildPhaseFailed:
// Since the tag hasn't been populated yet, we assume there hasn't been a successful
// build so far.
markers = append(markers, osgraph.Marker{
Node: graph.Node(latestBuild),
RelatedNodes: []graph.Node{graph.Node(istNode), graph.Node(bcNode)},
Severity: osgraph.ErrorSeverity,
Key: LatestBuildFailedErr,
Message: fmt.Sprintf("%s has failed.", f.ResourceName(latestBuild)),
Suggestion: osgraph.Suggestion(fmt.Sprintf("Inspect the build failure with 'oc logs -f bc/%s'", bcNode.BuildConfig.GetName())),
})
default:
// Do nothing when latest build is new, pending, or running.
}
}
// if no current builds exist for any of the build configs, append marker for that
// but ignore ISTs which have no build configs
if !buildFound && len(bcNodes) > 0 {
markers = append(markers, osgraph.Marker{
Node: graph.Node(istNode),
RelatedNodes: bcNodesToRelatedNodes(bcNodes),
Severity: osgraph.WarningSeverity,
Key: TagNotAvailableWarning,
Message: fmt.Sprintf("%s needs to be imported or created by a build.", f.ResourceName(istNode)),
Suggestion: osgraph.Suggestion(multiBCStartBuildSuggestion(bcNodes)),
})
}
return markers
}
示例7: getImageStreamTagSuggestion
// getImageStreamTagSuggestion will return the appropriate marker Suggestion for when a BuildConfig is missing its input ImageStreamTag; in particular,
// it will determine whether or not another BuildConfig can produce the aforementioned ImageStreamTag
func getImageStreamTagSuggestion(g osgraph.Graph, f osgraph.Namer, tagNode *imagegraph.ImageStreamTagNode) osgraph.Suggestion {
bcs := []string{}
for _, bcNode := range g.PredecessorNodesByEdgeKind(tagNode, buildedges.BuildOutputEdgeKind) {
bcs = append(bcs, f.ResourceName(bcNode))
}
if len(bcs) == 1 {
return osgraph.Suggestion(fmt.Sprintf("oc start-build %s", bcs[0]))
}
if len(bcs) > 0 {
return osgraph.Suggestion(fmt.Sprintf("`oc start-build` with one of these: %s.", strings.Join(bcs[:], ",")))
}
return osgraph.Suggestion(fmt.Sprintf("%s needs to be imported.", f.ResourceName(tagNode)))
}
示例8: createMissingScaleRefMarker
func createMissingScaleRefMarker(hpaNode *kubenodes.HorizontalPodAutoscalerNode, scaleRef graphapi.Node, namer osgraph.Namer) osgraph.Marker {
return osgraph.Marker{
Node: hpaNode,
Severity: osgraph.ErrorSeverity,
RelatedNodes: []graphapi.Node{scaleRef},
Key: HPAMissingScaleRefError,
Message: fmt.Sprintf("%s is attempting to scale %s/%s, which doesn't exist",
namer.ResourceName(hpaNode),
hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Kind,
hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Name,
),
}
}
示例9: FindOverlappingHPAs
// FindOverlappingHPAs scans the graph in search of HorizontalPodAutoscalers that are attempting to scale the same set of pods.
// This can occur in two ways:
// - 1. label selectors for two ReplicationControllers/DeploymentConfigs/etc overlap
// - 2. multiple HorizontalPodAutoscalers are attempting to scale the same ReplicationController/DeploymentConfig/etc
// Case 1 is handled by deconflicting the area of influence of ReplicationControllers/DeploymentConfigs/etc, and therefore we
// can assume that it will be handled before this step. Therefore, we are only concerned with finding HPAs that are trying to
// scale the same resources.
//
// The algorithm that is used to implement this check is described as follows:
// - create a sub-graph containing only HPA nodes and other nodes that can be scaled, as well as any scaling edges or other
// edges used to connect between objects that can be scaled
// - for every resulting edge in the new sub-graph, create an edge in the reverse direction
// - find the shortest paths between all HPA nodes in the graph
// - shortest paths connecting two horizontal pod autoscalers are used to create markers for the graph
func FindOverlappingHPAs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
nodeFilter := osgraph.NodesOfKind(
kubenodes.HorizontalPodAutoscalerNodeKind,
kubenodes.ReplicationControllerNodeKind,
deploynodes.DeploymentConfigNodeKind,
)
edgeFilter := osgraph.EdgesOfKind(
kubegraph.ScalingEdgeKind,
deploygraph.DeploymentEdgeKind,
kubeedges.ManagedByControllerEdgeKind,
)
hpaSubGraph := graph.Subgraph(nodeFilter, edgeFilter)
for _, edge := range hpaSubGraph.Edges() {
osgraph.AddReversedEdge(hpaSubGraph, edge.From(), edge.To(), sets.NewString())
}
hpaNodes := hpaSubGraph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind)
for _, firstHPA := range hpaNodes {
// we can use Dijkstra's algorithm as we know we do not have any negative edge weights
shortestPaths := path.DijkstraFrom(firstHPA, hpaSubGraph)
for _, secondHPA := range hpaNodes {
if firstHPA == secondHPA {
continue
}
shortestPath, _ := shortestPaths.To(secondHPA)
if shortestPath == nil {
// if two HPAs have no path between them, no error exists
continue
}
markers = append(markers, osgraph.Marker{
Node: firstHPA,
Severity: osgraph.WarningSeverity,
RelatedNodes: shortestPath[1:],
Key: HPAOverlappingScaleRefWarning,
Message: fmt.Sprintf("%s and %s overlap because they both attempt to scale %s",
namer.ResourceName(firstHPA), namer.ResourceName(secondHPA), nameList(shortestPath[1:len(shortestPath)-1], namer)),
})
}
}
return markers
}
示例10: humanReadableOutput
// humanReadableOutput traverses the provided graph using DFS and outputs it
// in a human-readable format. It starts from the provided root, assuming it
// is an imageStreamTag node and continues to the rest of the graph handling
// only imageStreamTag and buildConfig nodes.
func (d *ChainDescriber) humanReadableOutput(g osgraph.Graph, f osgraph.Namer, root graph.Node, reverse bool) string {
if reverse {
g = g.EdgeSubgraph(osgraph.ReverseExistingDirectEdge)
}
var singleNamespace bool
if len(d.namespaces) == 1 && !d.namespaces.Has(kapi.NamespaceAll) {
singleNamespace = true
}
depth := map[graph.Node]int{
root: 0,
}
out := ""
dfs := &DepthFirst{
Visit: func(u, v graph.Node) {
depth[v] = depth[u] + 1
},
}
until := func(node graph.Node) bool {
var info string
switch t := node.(type) {
case *imagegraph.ImageStreamTagNode:
info = outputHelper(f.ResourceName(t), t.Namespace, singleNamespace)
case *buildgraph.BuildConfigNode:
info = outputHelper(f.ResourceName(t), t.BuildConfig.Namespace, singleNamespace)
default:
panic("this graph contains node kinds other than imageStreamTags and buildConfigs")
}
if depth[node] != 0 {
out += "\n"
}
out += fmt.Sprintf("%s", strings.Repeat("\t", depth[node]))
out += fmt.Sprintf("%s", info)
return false
}
dfs.Walk(g, root, until)
return out
}
示例11: nameList
// nameList outputs a nicely-formatted list of names:
// - given nodes ['a', 'b', 'c'], this will return "one of a, b, or c"
// - given nodes ['a', 'b'], this will return "a or b"
// - given nodes ['a'], this will return "a"
func nameList(nodes []graphapi.Node, namer osgraph.Namer) string {
names := []string{}
for _, node := range nodes {
names = append(names, namer.ResourceName(node))
}
switch len(names) {
case 0:
return ""
case 1:
return names[0]
case 2:
return names[0] + " or " + names[1]
default:
return "one of " + strings.Join(names[:len(names)-1], ", ") + ", or " + names[len(names)-1]
}
}
示例12: FindHPASpecsMissingCPUTargets
// FindHPASpecsMissingCPUTargets scans the graph in search of HorizontalPodAutoscalers that are missing a CPU utilization target.
// As of right now, the only metric that HPAs can use to scale pods is the CPU utilization, so if a HPA is missing this target it
// is effectively useless.
func FindHPASpecsMissingCPUTargets(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) {
node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode)
if node.HorizontalPodAutoscaler.Spec.CPUUtilization == nil {
markers = append(markers, osgraph.Marker{
Node: node,
Severity: osgraph.ErrorSeverity,
Key: HPAMissingCPUTargetError,
Message: fmt.Sprintf("%s is missing a CPU utilization target", namer.ResourceName(node)),
Suggestion: osgraph.Suggestion(fmt.Sprintf(`oc patch %s -p '{"spec":{"cpuUtilization":{"targetPercentage": 80}}}'`, namer.ResourceName(node))),
})
}
}
return markers
}
示例13: pvcMarker
func pvcMarker(g osgraph.Graph, f osgraph.Namer, dcNode *deploygraph.DeploymentConfigNode) *osgraph.Marker {
for _, uncastPvcNode := range g.SuccessorNodesByEdgeKind(dcNode, deployedges.VolumeClaimEdgeKind) {
pvcNode := uncastPvcNode.(*kubegraph.PersistentVolumeClaimNode)
if !pvcNode.Found() {
return &osgraph.Marker{
Node: dcNode,
RelatedNodes: []graph.Node{uncastPvcNode},
Severity: osgraph.WarningSeverity,
Key: MissingPVCWarning,
Message: fmt.Sprintf("%s points to a missing persistent volume claim (%s).", f.ResourceName(dcNode), f.ResourceName(pvcNode)),
// TODO: Suggestion: osgraph.Suggestion(fmt.Sprintf("oc create pvc ...")),
}
}
dc := dcNode.DeploymentConfig
rollingParams := dc.Spec.Strategy.RollingParams
isBlockedBySize := dc.Spec.Replicas > 1
isBlockedRolling := rollingParams != nil && rollingParams.MaxSurge.IntValue() > 0
// If the claim is not RWO or deployments will not have more than a pod running at any time
// then they should be fine.
if !hasRWOAccess(pvcNode) || (!isBlockedRolling && !isBlockedBySize) {
continue
}
// This shouldn't be an issue on single-host clusters but they are not the common case anyway.
// If github.com/kubernetes/kubernetes/issues/26567 ever gets fixed upstream, then we can drop
// this warning.
return &osgraph.Marker{
Node: dcNode,
RelatedNodes: []graph.Node{uncastPvcNode},
Severity: osgraph.WarningSeverity,
Key: SingleHostVolumeWarning,
Message: fmt.Sprintf("%s references a volume which may only be used in a single pod at a time - this may lead to hung deployments", f.ResourceName(dcNode)),
}
}
return nil
}
示例14: FindMissingPortMapping
// FindMissingPortMapping checks all routes and reports those that don't specify a port while
// the service they are routing to, has multiple ports. Also if a service for a route doesn't
// exist, will be reported.
func FindMissingPortMapping(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
route:
for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) {
for _, uncastServiceNode := range g.SuccessorNodesByEdgeKind(uncastRouteNode, routeedges.ExposedThroughRouteEdgeKind) {
routeNode := uncastRouteNode.(*routegraph.RouteNode)
svcNode := uncastServiceNode.(*kubegraph.ServiceNode)
if !svcNode.Found() {
markers = append(markers, osgraph.Marker{
Node: routeNode,
RelatedNodes: []graph.Node{svcNode},
Severity: osgraph.WarningSeverity,
Key: MissingServiceWarning,
Message: fmt.Sprintf("%s is supposed to route traffic to %s but %s doesn't exist.",
f.ResourceName(routeNode), f.ResourceName(svcNode), f.ResourceName(svcNode)),
})
continue route
}
if len(svcNode.Spec.Ports) > 1 && (routeNode.Spec.Port == nil || len(routeNode.Spec.Port.TargetPort.String()) == 0) {
markers = append(markers, osgraph.Marker{
Node: routeNode,
RelatedNodes: []graph.Node{svcNode},
Severity: osgraph.WarningSeverity,
Key: MissingRoutePortWarning,
Message: fmt.Sprintf("%s doesn't have a port specified and is routing traffic to %s which uses multiple ports.",
f.ResourceName(routeNode), f.ResourceName(svcNode)),
})
continue route
}
}
}
return markers
}
示例15: FindUnmountableSecrets
// FindUnmountableSecrets inspects all PodSpecs for any Secret reference that isn't listed as mountable by the referenced ServiceAccount
func FindUnmountableSecrets(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) {
podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode)
unmountableSecrets := CheckForUnmountableSecrets(g, podSpecNode)
topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode)
topLevelString := f.ResourceName(topLevelNode)
saString := "MISSING_SA"
saNodes := g.SuccessorNodesByEdgeKind(podSpecNode, kubeedges.ReferencedServiceAccountEdgeKind)
if len(saNodes) > 0 {
saString = f.ResourceName(saNodes[0])
}
for _, unmountableSecret := range unmountableSecrets {
markers = append(markers, osgraph.Marker{
Node: podSpecNode,
RelatedNodes: []graph.Node{unmountableSecret},
Severity: osgraph.WarningSeverity,
Key: UnmountableSecretWarning,
Message: fmt.Sprintf("%s is attempting to mount a secret %s disallowed by %s",
topLevelString, f.ResourceName(unmountableSecret), saString),
})
}
}
return markers
}