本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/labels.SelectorFromSet函数的典型用法代码示例。如果您正苦于以下问题:Golang SelectorFromSet函数的具体用法?Golang SelectorFromSet怎么用?Golang SelectorFromSet使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SelectorFromSet函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: getOverlappingControllers
// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller.
func getOverlappingControllers(c client.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) {
rcs, err := c.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("error getting replication controllers: %v", err)
}
var matchingRCs []api.ReplicationController
rcLabels := labels.Set(rc.Spec.Selector)
for _, controller := range rcs.Items {
newRCLabels := labels.Set(controller.Spec.Selector)
if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) {
matchingRCs = append(matchingRCs, controller)
}
}
return matchingRCs, nil
}
示例2: PodMatchesNodeLabels
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
if len(pod.Spec.NodeSelector) == 0 {
return true
}
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
return selector.Matches(labels.Set(node.Labels))
}
示例3: getReplicationControllersForLabels
// Get all replication controllers whose selectors would match a given set of
// labels.
// TODO Move this to pkg/client and ideally implement it server-side (instead
// of getting all RC's and searching through them manually).
func getReplicationControllersForLabels(c client.ReplicationControllerInterface, labelsToMatch labels.Labels) string {
// Get all replication controllers.
// TODO this needs a namespace scope as argument
rcs, err := c.List(labels.Everything())
if err != nil {
glog.Fatalf("Error getting replication controllers: %v\n", err)
}
// Find the ones that match labelsToMatch.
var matchingRCs []api.ReplicationController
for _, controller := range rcs.Items {
selector := labels.SelectorFromSet(controller.Spec.Selector)
if selector.Matches(labelsToMatch) {
matchingRCs = append(matchingRCs, controller)
}
}
// Format the matching RC's into strings.
var rcStrings []string
for _, controller := range matchingRCs {
rcStrings = append(rcStrings, fmt.Sprintf("%s (%d/%d replicas created)", controller.Name, controller.Status.Replicas, controller.Spec.Replicas))
}
list := strings.Join(rcStrings, ", ")
if list == "" {
return "<none>"
}
return list
}
示例4: TestEtcdWatchEndpoints
func TestEtcdWatchEndpoints(t *testing.T) {
fakeClient := tools.NewFakeEtcdClient(t)
registry := NewTestEtcdRegistry(fakeClient)
watching, err := registry.WatchEndpoints(
labels.Everything(),
labels.SelectorFromSet(labels.Set{"ID": "foo"}),
1,
)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
fakeClient.WaitForWatchCompletion()
select {
case _, ok := <-watching.ResultChan():
if !ok {
t.Errorf("watching channel should be open")
}
default:
}
fakeClient.WatchInjectError <- nil
if _, ok := <-watching.ResultChan(); ok {
t.Errorf("watching channel should be closed")
}
watching.Stop()
}
示例5: PodSelectorMatches
func (n *NodeSelector) PodSelectorMatches(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
if len(pod.Spec.NodeSelector) == 0 {
return true, nil
}
// check whitelist
if whitelist, exists := pod.Spec.NodeSelector["whitelist"]; exists {
for _, hostIP := range strings.Split(whitelist, ",") {
if hostIP == node {
return true, nil
}
}
return false, nil
}
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
minion, err := n.info.GetNodeInfo(node)
if err != nil {
return false, err
}
// check blacklist and model
active := true
if val, exists := minion.Labels["active"]; exists {
if val == "false" {
active = false
}
}
if _, e1 := pod.Spec.NodeSelector["sriov"]; !e1 {
if sriov, e2 := minion.Labels["sriov"]; e2 && sriov == "1" {
return false, nil
}
}
return selector.Matches(labels.Set(minion.Labels)) && active, nil
}
示例6: CalculateSpreadPriority
// CalculateSpreadPriority spreads pods by minimizing the number of pods on the same machine with the same labels.
// Importantly, if there are services in the system that span multiple heterogenous sets of pods, this spreading priority
// may not provide optimal spreading for the members of that Service.
// TODO: consider if we want to include Service label sets in the scheduling priority.
func CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
pods, err := podLister.List(labels.SelectorFromSet(pod.Labels))
if err != nil {
return nil, err
}
minions, err := minionLister.List()
if err != nil {
return nil, err
}
var maxCount int
var fScore float32 = 10.0
counts := map[string]int{}
if len(pods) > 0 {
for _, pod := range pods {
counts[pod.Status.Host]++
// Compute the maximum number of pods hosted on any minion
if counts[pod.Status.Host] > maxCount {
maxCount = counts[pod.Status.Host]
}
}
}
result := []HostPriority{}
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for _, minion := range minions.Items {
if maxCount > 0 {
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
}
result = append(result, HostPriority{host: minion.Name, score: int(fScore)})
}
return result, nil
}
示例7: TestEtcdListImagesFiltered
func TestEtcdListImagesFiltered(t *testing.T) {
fakeClient := tools.NewFakeEtcdClient(t)
key := "/images"
fakeClient.Data[key] = tools.EtcdResponseWithError{
R: &etcd.Response{
Node: &etcd.Node{
Nodes: []*etcd.Node{
{
Value: runtime.EncodeOrDie(api.Image{
JSONBase: kubeapi.JSONBase{ID: "foo"},
Labels: map[string]string{"env": "prod"},
}),
},
{
Value: runtime.EncodeOrDie(api.Image{
JSONBase: kubeapi.JSONBase{ID: "bar"},
Labels: map[string]string{"env": "dev"},
}),
},
},
},
},
E: nil,
}
registry := NewTestEtcdRegistry(fakeClient)
images, err := registry.ListImages(labels.SelectorFromSet(labels.Set{"env": "dev"}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(images.Items) != 1 || images.Items[0].ID != "bar" {
t.Errorf("Unexpected images list: %#v", images)
}
}
示例8: podsResponding
func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
By("trying to dial each unique pod")
retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
}
示例9: TestEtcdWatchNodesNotMatch
func TestEtcdWatchNodesNotMatch(t *testing.T) {
ctx := api.NewDefaultContext()
storage, fakeClient := newStorage(t)
node := validNewNode()
watching, err := storage.Watch(ctx,
labels.SelectorFromSet(labels.Set{"name": "bar"}),
fields.Everything(),
"1",
)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
fakeClient.WaitForWatchCompletion()
nodeBytes, _ := latest.Codec.Encode(node)
fakeClient.WatchResponse <- &etcd.Response{
Action: "create",
Node: &etcd.Node{
Value: string(nodeBytes),
},
}
select {
case <-watching.ResultChan():
t.Error("unexpected result from result channel")
case <-time.After(time.Millisecond * 100):
// expected case
}
}
示例10: CoverServices
// CoverServices ensures that a directed edge exists between all deployment configs and the
// services that expose them (via label selectors).
func CoverServices(g Graph) Graph {
nodes := g.NodeList()
for _, node := range nodes {
switch svc := node.(type) {
case *ServiceNode:
if svc.Service.Spec.Selector == nil {
continue
}
query := labels.SelectorFromSet(svc.Service.Spec.Selector)
for _, n := range nodes {
switch target := n.(type) {
case *DeploymentConfigNode:
template := target.DeploymentConfig.Template.ControllerTemplate.Template
if template == nil {
continue
}
if query.Matches(labels.Set(template.Labels)) {
g.AddEdge(target, svc, ExposedThroughServiceGraphEdgeKind)
}
}
}
}
}
return g
}
示例11: CalculateAntiAffinityPriority
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
// on machines with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
var pods []api.Pod
services, err := s.serviceLister.GetPodServices(pod)
if err == nil {
// just use the first service and get the other pods within the service
// TODO: a separate predicate can be created that tries to handle all services for the pod
selector := labels.SelectorFromSet(services[0].Spec.Selector)
pods, err = podLister.List(selector)
if err != nil {
return nil, err
}
}
minions, err := minionLister.List()
if err != nil {
return nil, err
}
// separate out the minions that have the label from the ones that don't
otherMinions := []string{}
labeledMinions := map[string]string{}
for _, minion := range minions.Items {
if labels.Set(minion.Labels).Has(s.label) {
label := labels.Set(minion.Labels).Get(s.label)
labeledMinions[minion.Name] = label
} else {
otherMinions = append(otherMinions, minion.Name)
}
}
podCounts := map[string]int{}
for _, pod := range pods {
label, exists := labeledMinions[pod.Status.Host]
if !exists {
continue
}
podCounts[label]++
}
numServicePods := len(pods)
result := []HostPriority{}
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for minion := range labeledMinions {
// initializing to the default/max minion score of 10
fScore := float32(10)
if numServicePods > 0 {
fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
}
result = append(result, HostPriority{host: minion, score: int(fScore)})
}
// add the open minions with a score of 0
for _, minion := range otherMinions {
result = append(result, HostPriority{host: minion, score: 0})
}
return result, nil
}
示例12: TestPodUpdate
func TestPodUpdate(c *client.Client) bool {
podClient := c.Pods(api.NamespaceDefault)
pod := loadPodOrDie(assetPath("api", "examples", "pod.json"))
value := strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value
_, err := podClient.Create(pod)
if err != nil {
glog.Errorf("Failed to create pod: %v", err)
return false
}
defer podClient.Delete(pod.Name)
waitForPodRunning(c, pod.Name)
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
if len(pods.Items) != 1 {
glog.Errorf("Failed to find the correct pod")
return false
}
podOut, err := podClient.Get(pod.Name)
if err != nil {
glog.Errorf("Failed to get pod: %v", err)
return false
}
value = "time" + value
pod.Labels["time"] = value
pod.ResourceVersion = podOut.ResourceVersion
pod.UID = podOut.UID
pod, err = podClient.Update(pod)
if err != nil {
glog.Errorf("Failed to update pod: %v", err)
return false
}
waitForPodRunning(c, pod.Name)
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
if len(pods.Items) != 1 {
glog.Errorf("Failed to find the correct pod after update.")
return false
}
glog.Infof("pod update OK")
return true
}
示例13: PodSelectorMatches
func (n *NodeSelector) PodSelectorMatches(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
if len(pod.NodeSelector) == 0 {
return true, nil
}
selector := labels.SelectorFromSet(pod.NodeSelector)
minion, err := n.info.GetNodeInfo(node)
if err != nil {
return false, err
}
return selector.Matches(labels.Set(minion.Labels)), nil
}
示例14: TestEtcdWatchEndpointsBadSelector
func TestEtcdWatchEndpointsBadSelector(t *testing.T) {
fakeClient := tools.NewFakeEtcdClient(t)
registry := NewTestEtcdRegistry(fakeClient)
_, err := registry.WatchEndpoints(
labels.Everything(),
labels.SelectorFromSet(labels.Set{"Field.Selector": "foo"}),
0,
)
if err == nil {
t.Errorf("unexpected non-error: %v", err)
}
_, err = registry.WatchEndpoints(
labels.SelectorFromSet(labels.Set{"Label.Selector": "foo"}),
labels.Everything(),
0,
)
if err == nil {
t.Errorf("unexpected non-error: %v", err)
}
}
示例15: CalculateSpreadPriority
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
// on the same machine.
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (api.HostPriorityList, error) {
var maxCount int
var nsServicePods []*api.Pod
services, err := s.serviceLister.GetPodServices(pod)
if err == nil {
// just use the first service and get the other pods within the service
// TODO: a separate predicate can be created that tries to handle all services for the pod
selector := labels.SelectorFromSet(services[0].Spec.Selector)
pods, err := podLister.List(selector)
if err != nil {
return nil, err
}
// consider only the pods that belong to the same namespace
for _, nsPod := range pods {
if nsPod.Namespace == pod.Namespace {
nsServicePods = append(nsServicePods, nsPod)
}
}
}
minions, err := minionLister.List()
if err != nil {
return nil, err
}
counts := map[string]int{}
if len(nsServicePods) > 0 {
for _, pod := range nsServicePods {
counts[pod.Spec.NodeName]++
// Compute the maximum number of pods hosted on any minion
if counts[pod.Spec.NodeName] > maxCount {
maxCount = counts[pod.Spec.NodeName]
}
}
}
result := []api.HostPriority{}
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for _, minion := range minions.Items {
// initializing to the default/max minion score of 10
fScore := float32(10)
if maxCount > 0 {
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
}
result = append(result, api.HostPriority{Host: minion.Name, Score: int(fScore)})
glog.V(10).Infof(
"%v -> %v: ServiceSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore),
)
}
return result, nil
}