本文整理匯總了Golang中k8s/io/apimachinery/pkg/util/sets.String.List方法的典型用法代碼示例。如果您正苦於以下問題:Golang String.List方法的具體用法?Golang String.List怎麽用?Golang String.List使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類k8s/io/apimachinery/pkg/util/sets.String
的用法示例。
在下文中一共展示了String.List方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: getPriorityFunctionConfigs
func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
configs := []algorithm.PriorityConfig{}
for _, name := range names.List() {
factory, ok := priorityFunctionMap[name]
if !ok {
return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name)
}
if factory.Function != nil {
configs = append(configs, algorithm.PriorityConfig{
Function: factory.Function(args),
Weight: factory.Weight,
})
} else {
mapFunction, reduceFunction := factory.MapReduceFunction(args)
configs = append(configs, algorithm.PriorityConfig{
Map: mapFunction,
Reduce: reduceFunction,
Weight: factory.Weight,
})
}
}
return configs, nil
}
示例2: parseRequirement
func (p *Parser) parseRequirement() (*Requirement, error) {
key, operator, err := p.parseKeyAndInferOperator()
if err != nil {
return nil, err
}
if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked
return NewRequirement(key, operator, []string{})
}
operator, err = p.parseOperator()
if err != nil {
return nil, err
}
var values sets.String
switch operator {
case selection.In, selection.NotIn:
values, err = p.parseValues()
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan:
values, err = p.parseExactValue()
}
if err != nil {
return nil, err
}
return NewRequirement(key, operator, values.List())
}
示例3: doTestIndex
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
mkObj := func(id string, val string) testStoreObject {
return testStoreObject{id: id, val: val}
}
// Test Index
expected := map[string]sets.String{}
expected["b"] = sets.NewString("a", "c")
expected["f"] = sets.NewString("e")
expected["h"] = sets.NewString("g")
indexer.Add(mkObj("a", "b"))
indexer.Add(mkObj("c", "b"))
indexer.Add(mkObj("e", "f"))
indexer.Add(mkObj("g", "h"))
{
for k, v := range expected {
found := sets.String{}
indexResults, err := indexer.Index("by_val", mkObj("", k))
if err != nil {
t.Errorf("Unexpected error %v", err)
}
for _, item := range indexResults {
found.Insert(item.(testStoreObject).id)
}
items := v.List()
if !found.HasAll(items...) {
t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
}
}
}
}
示例4: getRequirement
func getRequirement(key string, op selection.Operator, vals sets.String, t *testing.T) Requirement {
req, err := NewRequirement(key, op, vals.List())
if err != nil {
t.Errorf("NewRequirement(%v, %v, %v) resulted in error:%v", key, op, vals, err)
return Requirement{}
}
return *req
}
示例5: copyAndReplace
func copyAndReplace(set sets.String, replaceWhat, replaceWith string) sets.String {
result := sets.NewString(set.List()...)
if result.Has(replaceWhat) {
result.Delete(replaceWhat)
result.Insert(replaceWith)
}
return result
}
示例6: runResourceTrackingTest
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
// TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName,
Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: totalPods,
})).NotTo(HaveOccurred())
// Log once and flush the stats.
rm.LogLatest()
rm.Reset()
By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met.
// Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine
// for the current test duration, but we should reclaim the
// entries if we plan to monitor longer (e.g., 8 hours).
deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now())
framework.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
time.Sleep(reportingPeriod)
}
logPodsOnNodes(f.ClientSet, nodeNames.List())
}
By("Reporting overall resource usage")
logPodsOnNodes(f.ClientSet, nodeNames.List())
usageSummary, err := rm.GetLatest()
Expect(err).NotTo(HaveOccurred())
// TODO(random-liu): Remove the original log when we migrate to new perfdash
framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary()
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
// Log perf result
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC")
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
}
示例7: ExampleNewInformer
func ExampleNewInformer() {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
// Make a controller that immediately deletes anything added to it, and
// logs anything deleted.
_, controller := NewInformer(
source,
&v1.Pod{},
time.Millisecond*100,
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
source.Delete(obj.(runtime.Object))
},
DeleteFunc: func(obj interface{}) {
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
key = "oops something went wrong with the key"
}
// Report this deletion.
deletionCounter <- key
},
},
)
// Run the controller and run it until we close stop.
stop := make(chan struct{})
defer close(stop)
go controller.Run(stop)
// Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"}
for _, name := range testIDs {
// Note that these pods are not valid-- the fake source doesn't
// call validation or anything.
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}})
}
// Let's wait for the controller to process the things we just added.
outputSet := sets.String{}
for i := 0; i < len(testIDs); i++ {
outputSet.Insert(<-deletionCounter)
}
for _, key := range outputSet.List() {
fmt.Println(key)
}
// Output:
// a-hello
// b-controller
// c-framework
}
示例8: removeUserVisiblePaths
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
orderedPaths := paths.List()
for ii := len(orderedPaths) - 1; ii >= 0; ii-- {
if err := os.Remove(path.Join(w.targetDir, orderedPaths[ii])); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, orderedPaths[ii], err)
return err
}
}
return nil
}
示例9: RESTMapper
// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order:
// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR
// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy
// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version,
// all other groups alphabetical.
func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper {
unionMapper := meta.MultiRESTMapper{}
unionedGroups := sets.NewString()
for enabledVersion := range m.enabledVersions {
if !unionedGroups.Has(enabledVersion.Group) {
unionedGroups.Insert(enabledVersion.Group)
groupMeta := m.groupMetaMap[enabledVersion.Group]
unionMapper = append(unionMapper, groupMeta.RESTMapper)
}
}
if len(versionPatterns) != 0 {
resourcePriority := []schema.GroupVersionResource{}
kindPriority := []schema.GroupVersionKind{}
for _, versionPriority := range versionPatterns {
resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
}
return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
}
if len(m.envRequestedVersions) != 0 {
resourcePriority := []schema.GroupVersionResource{}
kindPriority := []schema.GroupVersionKind{}
for _, versionPriority := range m.envRequestedVersions {
resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
}
return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
}
prioritizedGroups := []string{"", "extensions", "metrics"}
resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...)
prioritizedGroupsSet := sets.NewString(prioritizedGroups...)
remainingGroups := sets.String{}
for enabledVersion := range m.enabledVersions {
if !prioritizedGroupsSet.Has(enabledVersion.Group) {
remainingGroups.Insert(enabledVersion.Group)
}
}
remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...)
resourcePriority = append(resourcePriority, remainingResourcePriority...)
kindPriority = append(kindPriority, remainingKindPriority...)
return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
}
示例10: getFitPredicateFunctions
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
predicates := map[string]algorithm.FitPredicate{}
for _, name := range names.List() {
factory, ok := fitPredicateMap[name]
if !ok {
return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name)
}
predicates[name] = factory(args)
}
return predicates, nil
}
示例11: ChooseZoneForVolume
// ChooseZone implements our heuristics for choosing a zone for volume creation based on the volume name
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
// assuming the id values are consecutive.
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
var hash uint32
var index uint32
if pvcName == "" {
// We should always be called with a name; this shouldn't happen
glog.Warningf("No name defined during volume create; choosing random zone")
hash = rand.Uint32()
} else {
hashString := pvcName
// Heuristic to make sure that volumes in a StatefulSet are spread across zones
// StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
// where Id is an integer index
lastDash := strings.LastIndexByte(pvcName, '-')
if lastDash != -1 {
petIDString := pvcName[lastDash+1:]
petID, err := strconv.ParseUint(petIDString, 10, 32)
if err == nil {
// Offset by the pet id, so we round-robin across zones
index = uint32(petID)
// We still hash the volume name, but only the base
hashString = pvcName[:lastDash]
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
}
}
// We hash the (base) volume name, so we don't bias towards the first N zones
h := fnv.New32()
h.Write([]byte(hashString))
hash = h.Sum32()
}
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
return zone
}
示例12: syncResourceList
func (t *ThirdPartyController) syncResourceList(list runtime.Object) error {
existing := sets.String{}
switch list := list.(type) {
case *extensions.ThirdPartyResourceList:
// Loop across all schema objects for third party resources
for ix := range list.Items {
item := &list.Items[ix]
// extract the api group and resource kind from the schema
_, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(item)
if err != nil {
return err
}
// place it in the set of resources that we expect, so that we don't delete it in the delete pass
existing.Insert(MakeThirdPartyPath(group))
// ensure a RESTful resource for this schema exists on the master
if err := t.SyncOneResource(item); err != nil {
return err
}
}
default:
return fmt.Errorf("expected a *ThirdPartyResourceList, got %#v", list)
}
// deletion phase, get all installed RESTful resources
installed := t.master.ListThirdPartyResources()
for _, installedAPI := range installed {
found := false
// search across the expected restful resources to see if this resource belongs to one of the expected ones
for _, apiPath := range existing.List() {
if installedAPI == apiPath || strings.HasPrefix(installedAPI, apiPath+"/") {
found = true
break
}
}
// not expected, delete the resource
if !found {
if err := t.master.RemoveThirdPartyResource(installedAPI); err != nil {
return err
}
}
}
return nil
}
示例13: waitTillNPodsRunningOnNodes
// waitTillNPodsRunningOnNodes polls the /runningpods endpoint on kubelet until
// it finds targetNumPods pods that match the given criteria (namespace and
// podNamePrefix). Note that we usually use label selector to filter pods that
// belong to the same RC. However, we use podNamePrefix with namespace here
// because pods returned from /runningpods do not contain the original label
// information; they are reconstructed by examining the container runtime. In
// the scope of this test, we do not expect pod naming conflicts so
// podNamePrefix should be sufficient to identify the pods.
func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
return wait.Poll(pollInterval, timeout, func() (bool, error) {
matchCh := make(chan sets.String, len(nodeNames))
for _, item := range nodeNames.List() {
// Launch a goroutine per node to check the pods running on the nodes.
nodeName := item
go func() {
matchCh <- getPodMatches(c, nodeName, podNamePrefix, namespace)
}()
}
seen := sets.NewString()
for i := 0; i < len(nodeNames.List()); i++ {
seen = seen.Union(<-matchCh)
}
if seen.Len() == targetNumPods {
return true, nil
}
framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
return false, nil
})
}
示例14: Object
// Object returns a single object representing the output of a single visit to all
// found resources. If the Builder was a singular context (expected to return a
// single resource by user input) and only a single resource was found, the resource
// will be returned as is. Otherwise, the returned resources will be part of an
// api.List. The ResourceVersion of the api.List will be set only if it is identical
// across all infos returned.
func (r *Result) Object() (runtime.Object, error) {
infos, err := r.Infos()
if err != nil {
return nil, err
}
versions := sets.String{}
objects := []runtime.Object{}
for _, info := range infos {
if info.Object != nil {
objects = append(objects, info.Object)
versions.Insert(info.ResourceVersion)
}
}
if len(objects) == 1 {
if r.singleItemImplied {
return objects[0], nil
}
// if the item is a list already, don't create another list
if meta.IsListType(objects[0]) {
return objects[0], nil
}
}
version := ""
if len(versions) == 1 {
version = versions.List()[0]
}
return &api.List{
ListMeta: metav1.ListMeta{
ResourceVersion: version,
},
Items: objects,
}, err
}
示例15: Example
func Example() {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// This will hold the downstream state, as we know it.
downstream := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass downstream in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, downstream)
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
cfg := &Config{
Queue: fifo,
ListerWatcher: source,
ObjectType: &v1.Pod{},
FullResyncPeriod: time.Millisecond * 100,
RetryOnError: false,
// Let's implement a simple controller that just deletes
// everything that comes in.
Process: func(obj interface{}) error {
// Obj is from the Pop method of the Queue we make above.
newest := obj.(Deltas).Newest()
if newest.Type != Deleted {
// Update our downstream store.
err := downstream.Add(newest.Object)
if err != nil {
return err
}
// Delete this object.
source.Delete(newest.Object.(runtime.Object))
} else {
// Update our downstream store.
err := downstream.Delete(newest.Object)
if err != nil {
return err
}
// fifo's KeyOf is easiest, because it handles
// DeletedFinalStateUnknown markers.
key, err := fifo.KeyOf(newest.Object)
if err != nil {
return err
}
// Report this deletion.
deletionCounter <- key
}
return nil
},
}
// Create the controller and run it until we close stop.
stop := make(chan struct{})
defer close(stop)
go New(cfg).Run(stop)
// Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"}
for _, name := range testIDs {
// Note that these pods are not valid-- the fake source doesn't
// call validation or anything.
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}})
}
// Let's wait for the controller to process the things we just added.
outputSet := sets.String{}
for i := 0; i < len(testIDs); i++ {
outputSet.Insert(<-deletionCounter)
}
for _, key := range outputSet.List() {
fmt.Println(key)
}
// Output:
// a-hello
// b-controller
// c-framework
}