本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/sets.NewString函数的典型用法代码示例。如果您正苦于以下问题:Golang NewString函数的具体用法?Golang NewString怎么用?Golang NewString使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewString函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: buildClientDiagnostics
// buildClientDiagnostics builds client Diagnostic objects based on the rawConfig passed in.
// Returns the Diagnostics built, "ok" bool for whether to proceed or abort, and an error if any was encountered during the building of diagnostics.) {
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
available := availableClientDiagnostics
// osClient, kubeClient, clientErr := o.Factory.Clients() // use with a diagnostic that needs OpenShift/Kube client
_, _, clientErr := o.Factory.Clients()
if clientErr != nil {
o.Logger.Notice("CED0001", "Failed creating client from config; client diagnostics will be limited to config testing")
available = sets.NewString(clientdiags.ConfigContextsName)
}
diagnostics := []types.Diagnostic{}
requestedDiagnostics := intersection(sets.NewString(o.RequestedDiagnostics...), available).List()
for _, diagnosticName := range requestedDiagnostics {
switch diagnosticName {
case clientdiags.ConfigContextsName:
for contextName := range rawConfig.Contexts {
diagnostics = append(diagnostics, clientdiags.ConfigContext{rawConfig, contextName})
}
default:
return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
}
}
return diagnostics, true, clientErr
}
示例2: TestProcessItem
// test the processItem function making the expected actions.
func TestProcessItem(t *testing.T) {
pod := newDanglingPod()
podBytes, err := json.Marshal(pod)
if err != nil {
t.Fatal(err)
}
testHandler := &fakeActionHandler{
response: map[string]FakeResponse{
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
404,
[]byte{},
},
"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
200,
podBytes,
},
},
}
podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
clientConfig.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)
clientConfig.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, podResource)
if err != nil {
t.Fatal(err)
}
item := &node{
identity: objectReference{
OwnerReference: metatypes.OwnerReference{
Kind: pod.Kind,
APIVersion: pod.APIVersion,
Name: pod.Name,
UID: pod.UID,
},
Namespace: pod.Namespace,
},
// owners are intentionally left empty. The processItem routine should get the latest item from the server.
owners: nil,
}
err = gc.processItem(item)
if err != nil {
t.Errorf("Unexpected Error: %v", err)
}
expectedActionSet := sets.NewString()
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
actualActionSet := sets.NewString()
for _, action := range testHandler.actions {
actualActionSet.Insert(action.String())
}
if !expectedActionSet.Equal(actualActionSet) {
t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
actualActionSet, expectedActionSet.Difference(actualActionSet))
}
}
示例3: limitSecretReferences
func (s *serviceAccount) limitSecretReferences(serviceAccount *api.ServiceAccount, pod *api.Pod) error {
// Ensure all secrets the pod references are allowed by the service account
mountableSecrets := sets.NewString()
for _, s := range serviceAccount.Secrets {
mountableSecrets.Insert(s.Name)
}
for _, volume := range pod.Spec.Volumes {
source := volume.VolumeSource
if source.Secret == nil {
continue
}
secretName := source.Secret.SecretName
if !mountableSecrets.Has(secretName) {
return fmt.Errorf("Volume with secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", secretName, serviceAccount.Name)
}
}
// limit pull secret references as well
pullSecrets := sets.NewString()
for _, s := range serviceAccount.ImagePullSecrets {
pullSecrets.Insert(s.Name)
}
for i, pullSecretRef := range pod.Spec.ImagePullSecrets {
if !pullSecrets.Has(pullSecretRef.Name) {
return fmt.Errorf(`imagePullSecrets[%d].name="%s" is not allowed because service account %s does not reference that imagePullSecret`, i, pullSecretRef.Name, serviceAccount.Name)
}
}
return nil
}
示例4: newRESTMapper
func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
// the list of kinds that are scoped at the root of the api hierarchy
// if a kind is not enumerated here, it is assumed to have a namespace scope
rootScoped := sets.NewString(
"Node",
"Namespace",
"PersistentVolume",
"ComponentStatus",
)
// these kinds should be excluded from the list of resources
ignoredKinds := sets.NewString(
"ListOptions",
"DeleteOptions",
"Status",
"PodLogOptions",
"PodExecOptions",
"PodAttachOptions",
"PodProxyOptions",
"ThirdPartyResource",
"ThirdPartyResourceData",
"ThirdPartyResourceList")
mapper := api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
// setup aliases for groups of resources
mapper.AddResourceAlias("all", userResources...)
return mapper
}
示例5: Sync
// Sync sync firewall rules with the cloud.
func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error {
if len(nodePorts) == 0 {
return fr.Shutdown()
}
// Firewall rule prefix must match that inserted by the gce library.
suffix := fr.namer.FrSuffix()
// TODO: Fix upstream gce cloudprovider lib so GET also takes the suffix
// instead of the whole name.
name := fr.namer.FrName(suffix)
rule, _ := fr.cloud.GetFirewall(name)
if rule == nil {
glog.Infof("Creating global l7 firewall rule %v", name)
return fr.cloud.CreateFirewall(suffix, "GCE L7 firewall rule", fr.srcRange, nodePorts, nodeNames)
}
requiredPorts := sets.NewString()
for _, p := range nodePorts {
requiredPorts.Insert(strconv.Itoa(int(p)))
}
existingPorts := sets.NewString()
for _, allowed := range rule.Allowed {
for _, p := range allowed.Ports {
existingPorts.Insert(p)
}
}
if requiredPorts.Equal(existingPorts) {
return nil
}
glog.V(3).Infof("Firewall rule %v already exists, updating nodeports %v", name, nodePorts)
return fr.cloud.UpdateFirewall(suffix, "GCE L7 firewall rule", fr.srcRange, nodePorts, nodeNames)
}
示例6: TestAddAfterTry
func TestAddAfterTry(t *testing.T) {
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
evictor.Add("first", "11111")
evictor.Add("second", "22222")
evictor.Add("third", "33333")
evictor.Remove("second")
deletedMap := sets.NewString()
evictor.Try(func(value TimedValue) (bool, time.Duration) {
deletedMap.Insert(value.Value)
return true, 0
})
setPattern := sets.NewString("first", "third")
if len(deletedMap) != len(setPattern) {
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
}
if !CheckSetEq(setPattern, deletedMap) {
t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
}
evictor.Add("first", "11111")
evictor.Try(func(value TimedValue) (bool, time.Duration) {
t.Errorf("We shouldn't process the same value if the explicit remove wasn't called.")
return true, 0
})
}
示例7: TestAdmissionPluginChains
// TestAdmissionPluginChains makes sure that the admission plugin lists are coherent.
// we have to maintain three different lists of plugins: default origin, default kube, default combined
// the set of (default origin and default kube) and default combined, but must be equal
// the order of default origin must follow the order of default combined
// the order of default kube must follow the order of default combined
func TestAdmissionPluginChains(t *testing.T) {
individualSet := sets.NewString(openshiftAdmissionControlPlugins...)
individualSet.Insert(KubeAdmissionPlugins...)
combinedSet := sets.NewString(CombinedAdmissionControlPlugins...)
if !individualSet.Equal(combinedSet) {
t.Fatalf("individualSets are missing: %v combinedSet is missing: %v", combinedSet.Difference(individualSet), individualSet.Difference(combinedSet))
}
lastCurrIndex := -1
for _, plugin := range openshiftAdmissionControlPlugins {
for lastCurrIndex = lastCurrIndex + 1; lastCurrIndex < len(CombinedAdmissionControlPlugins); lastCurrIndex++ {
if CombinedAdmissionControlPlugins[lastCurrIndex] == plugin {
break
}
}
if lastCurrIndex >= len(CombinedAdmissionControlPlugins) {
t.Errorf("openshift admission plugins are out of order compared to the combined list. Failed at %v", plugin)
}
}
lastCurrIndex = -1
for _, plugin := range KubeAdmissionPlugins {
for lastCurrIndex = lastCurrIndex + 1; lastCurrIndex < len(CombinedAdmissionControlPlugins); lastCurrIndex++ {
if CombinedAdmissionControlPlugins[lastCurrIndex] == plugin {
break
}
}
if lastCurrIndex >= len(CombinedAdmissionControlPlugins) {
t.Errorf("kube admission plugins are out of order compared to the combined list. Failed at %v", plugin)
}
}
}
示例8: buildClientDiagnostics
// buildClientDiagnostics builds client Diagnostic objects based on the rawConfig passed in.
// Returns the Diagnostics built, "ok" bool for whether to proceed or abort, and an error if any was encountered during the building of diagnostics.) {
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
available := availableClientDiagnostics
// osClient, kubeClient, clientErr := o.Factory.Clients() // use with a diagnostic that needs OpenShift/Kube client
_, _, clientErr := o.Factory.Clients()
if clientErr != nil {
o.Logger.Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
available = sets.NewString(clientdiags.ConfigContextsName)
}
diagnostics := []types.Diagnostic{}
requestedDiagnostics := intersection(sets.NewString(o.RequestedDiagnostics...), available).List()
for _, diagnosticName := range requestedDiagnostics {
switch diagnosticName {
case clientdiags.ConfigContextsName:
seen := map[string]bool{}
for contextName := range rawConfig.Contexts {
diagnostic := clientdiags.ConfigContext{RawConfig: rawConfig, ContextName: contextName}
if clusterUser, defined := diagnostic.ContextClusterUser(); !defined {
// definitely want to diagnose the broken context
diagnostics = append(diagnostics, diagnostic)
} else if !seen[clusterUser] {
seen[clusterUser] = true // avoid validating same user for multiple projects
diagnostics = append(diagnostics, diagnostic)
}
}
default:
return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
}
}
return diagnostics, true, clientErr
}
示例9: doGenCommitters
func (sq *SubmitQueue) doGenCommitters(config *github_util.Config) error {
pushUsers, pullUsers, err := config.UsersWithAccess()
if err != nil {
glog.Fatalf("Unable to read committers from github: %v", err)
}
pushSet := sets.NewString()
for _, user := range pushUsers {
pushSet.Insert(*user.Login)
}
pullSet := sets.NewString()
for _, user := range pullUsers {
pullSet.Insert(*user.Login)
}
if err = writeWhitelist(sq.Committers, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions should go in the whitelist", pushSet); err != nil {
glog.Fatalf("Unable to write committers: %v", err)
}
glog.Info("Successfully updated committers file.")
existingWhitelist, err := loadWhitelist(sq.Whitelist)
if err != nil {
glog.Fatalf("error loading whitelist; it will not be updated: %v", err)
}
neededInWhitelist := existingWhitelist.Union(pullSet)
neededInWhitelist = neededInWhitelist.Difference(pushSet)
if err = writeWhitelist(sq.Whitelist, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions may be added by hand", neededInWhitelist); err != nil {
glog.Fatalf("Unable to write additional user whitelist: %v", err)
}
glog.Info("Successfully update whitelist file.")
return nil
}
示例10: computeUpdatedSCC
// computeUpdatedSCC determines if the expected SCC looks like the actual SCC
// it does this by making the expected SCC mirror the actual SCC for items that
// we are not reconciling and performing a diff (ignoring changes to metadata).
// If a diff is produced then the expected SCC is submitted as needing an update.
func (o *ReconcileSCCOptions) computeUpdatedSCC(expected kapi.SecurityContextConstraints, actual kapi.SecurityContextConstraints) (*kapi.SecurityContextConstraints, bool) {
needsUpdate := false
// if unioning old and new groups/users then make the expected contain all
// also preserve and set priorities
if o.Union {
groupSet := sets.NewString(actual.Groups...)
groupSet.Insert(expected.Groups...)
expected.Groups = groupSet.List()
userSet := sets.NewString(actual.Users...)
userSet.Insert(expected.Users...)
expected.Users = userSet.List()
if actual.Priority != nil {
expected.Priority = actual.Priority
}
}
// sort users and groups to remove any variants in order when diffing
sort.StringSlice(actual.Groups).Sort()
sort.StringSlice(actual.Users).Sort()
sort.StringSlice(expected.Groups).Sort()
sort.StringSlice(expected.Users).Sort()
// make a copy of the expected scc here so we can ignore metadata diffs.
updated := expected
expected.ObjectMeta = actual.ObjectMeta
if !kapi.Semantic.DeepEqual(expected, actual) {
needsUpdate = true
}
return &updated, needsUpdate
}
示例11: TestCacheKeyFields
func TestCacheKeyFields(t *testing.T) {
keyJSON, err := cacheKey(kapi.NewContext(), &authorizer.DefaultAuthorizationAttributes{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
keyMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(keyJSON), &keyMap); err != nil {
t.Fatalf("unexpected error: %v", err)
}
keys := sets.NewString()
for k := range keyMap {
keys.Insert(strings.ToLower(k))
}
// These are results we don't expect to be in the cache key
expectedMissingKeys := sets.NewString("requestattributes")
attrType := reflect.TypeOf((*authorizer.AuthorizationAttributes)(nil)).Elem()
for i := 0; i < attrType.NumMethod(); i++ {
name := attrType.Method(i).Name
name = strings.TrimPrefix(name, "Get")
name = strings.TrimPrefix(name, "Is")
name = strings.ToLower(name)
if !keys.Has(name) && !expectedMissingKeys.Has(name) {
t.Errorf("computed cache is missing an entry for %s", attrType.Method(i).Name)
}
}
}
示例12: TestInstancesAddedToZones
func TestInstancesAddedToZones(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
lbc := newLoadBalancerController(t, cm, "")
zoneToNode := map[string][]string{
"zone-1": {"n1", "n2"},
"zone-2": {"n3"},
}
addNodes(lbc, zoneToNode)
// Create 2 igs, one per zone.
testIG := "test-ig"
testPort := int64(3001)
lbc.CloudClusterManager.instancePool.AddInstanceGroup(testIG, testPort)
// node pool syncs kube-nodes, this will add them to both igs.
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()
i := 0
for z, nodeNames := range zoneToNode {
if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
}
if cm.fakeIGs.Ports[i] != testPort {
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
}
expNodes := sets.NewString(nodeNames...)
gotNodes := sets.NewString(gotZonesToNode[z]...)
if !gotNodes.Equal(expNodes) {
t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes)
}
i++
}
}
示例13: doTestIndex
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
mkObj := func(id string, val string) testStoreObject {
return testStoreObject{id: id, val: val}
}
// Test Index
expected := map[string]sets.String{}
expected["b"] = sets.NewString("a", "c")
expected["f"] = sets.NewString("e")
expected["h"] = sets.NewString("g")
indexer.Add(mkObj("a", "b"))
indexer.Add(mkObj("c", "b"))
indexer.Add(mkObj("e", "f"))
indexer.Add(mkObj("g", "h"))
{
for k, v := range expected {
found := sets.String{}
indexResults, err := indexer.Index("by_val", mkObj("", k))
if err != nil {
t.Errorf("Unexpected error %v", err)
}
for _, item := range indexResults {
found.Insert(item.(testStoreObject).id)
}
items := v.List()
if !found.HasAll(items...) {
t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
}
}
}
}
示例14: TestProxyProtocolEnabled
func TestProxyProtocolEnabled(t *testing.T) {
policies := sets.NewString(ProxyProtocolPolicyName, "FooBarFoo")
fakeBackend := &elb.BackendServerDescription{
InstancePort: aws.Int64(80),
PolicyNames: stringSetToPointers(policies),
}
result := proxyProtocolEnabled(fakeBackend)
assert.True(t, result, "expected to find %s in %s", ProxyProtocolPolicyName, policies)
policies = sets.NewString("FooBarFoo")
fakeBackend = &elb.BackendServerDescription{
InstancePort: aws.Int64(80),
PolicyNames: []*string{
aws.String("FooBarFoo"),
},
}
result = proxyProtocolEnabled(fakeBackend)
assert.False(t, result, "did not expect to find %s in %s", ProxyProtocolPolicyName, policies)
policies = sets.NewString()
fakeBackend = &elb.BackendServerDescription{
InstancePort: aws.Int64(80),
}
result = proxyProtocolEnabled(fakeBackend)
assert.False(t, result, "did not expect to find %s in %s", ProxyProtocolPolicyName, policies)
}
示例15: Ingest
// Ingest method implements extraction.Ingester (necessary for Prometheus library
// to parse the metrics).
func (a *APIResponsiveness) Ingest(samples model.Samples) error {
ignoredResources := sets.NewString("events")
ignoredVerbs := sets.NewString("WATCHLIST", "PROXY")
for _, sample := range samples {
// Example line:
// apiserver_request_latencies_summary{resource="namespaces",verb="LIST",quantile="0.99"} 908
if sample.Metric[model.MetricNameLabel] != "apiserver_request_latencies_summary" {
continue
}
resource := string(sample.Metric["resource"])
verb := string(sample.Metric["verb"])
if ignoredResources.Has(resource) || ignoredVerbs.Has(verb) {
continue
}
latency := sample.Value
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
if err != nil {
return err
}
a.addMetric(resource, verb, quantile, time.Duration(int64(latency))*time.Microsecond)
}
return nil
}