本文整理汇总了Golang中k8s/io/kubernetes/pkg/apis/batch.Resource函数的典型用法代码示例。如果您正苦于以下问题:Golang Resource函数的具体用法?Golang Resource怎么用?Golang Resource使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Resource函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: getBatchResources
// getBatchResources returns the resources for batch api
func (m *Master) getBatchResources(c *Config, version unversioned.GroupVersion) map[string]rest.Storage {
storage := map[string]rest.Storage{}
if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("jobs")) {
jobsStorage, jobsStatusStorage := jobetcd.NewREST(m.GetRESTOptionsOrDie(c, batch.Resource("jobs")))
storage["jobs"] = jobsStorage
storage["jobs/status"] = jobsStatusStorage
}
return storage
}
示例2: v2alpha1Storage
func (p RESTStorageProvider) v2alpha1Storage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) map[string]rest.Storage {
version := batchapiv2alpha1.SchemeGroupVersion
storage := map[string]rest.Storage{}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("jobs")) {
jobsStorage, jobsStatusStorage := jobetcd.NewREST(restOptionsGetter(batch.Resource("jobs")))
storage["jobs"] = jobsStorage
storage["jobs/status"] = jobsStatusStorage
}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("cronjobs")) {
cronJobsStorage, cronJobsStatusStorage := cronjobetcd.NewREST(restOptionsGetter(batch.Resource("cronjobs")))
storage["cronjobs"] = cronJobsStorage
storage["cronjobs/status"] = cronJobsStatusStorage
storage["scheduledjobs"] = cronJobsStorage
storage["scheduledjobs/status"] = cronJobsStatusStorage
}
return storage
}
示例3: NewREST
// NewREST returns a RESTStorage object that will work against Jobs.
func NewREST(opts generic.RESTOptions) (*REST, *StatusREST) {
prefix := "/" + opts.ResourcePrefix
newListFunc := func() runtime.Object { return &batch.JobList{} }
storageInterface, dFunc := opts.Decorator(
opts.StorageConfig,
cachesize.GetWatchCacheSizeByResource(cachesize.Jobs),
&batch.Job{},
prefix,
job.Strategy,
newListFunc,
job.GetAttrs,
storage.NoTriggerPublisher,
)
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &batch.Job{} },
// NewListFunc returns an object capable of storing results of an etcd list.
NewListFunc: newListFunc,
// Produces a path that etcd understands, to the root of the resource
// by combining the namespace in the context with the given prefix
KeyRootFunc: func(ctx api.Context) string {
return genericregistry.NamespaceKeyRootFunc(ctx, prefix)
},
// Produces a path that etcd understands, to the resource by combining
// the namespace in the context with the given prefix
KeyFunc: func(ctx api.Context, name string) (string, error) {
return genericregistry.NamespaceKeyFunc(ctx, prefix, name)
},
// Retrieve the name field of a job
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*batch.Job).Name, nil
},
// Used to match objects based on labels/fields for list and watch
PredicateFunc: job.MatchJob,
QualifiedResource: batch.Resource("jobs"),
EnableGarbageCollection: opts.EnableGarbageCollection,
DeleteCollectionWorkers: opts.DeleteCollectionWorkers,
// Used to validate job creation
CreateStrategy: job.Strategy,
// Used to validate job updates
UpdateStrategy: job.Strategy,
DeleteStrategy: job.Strategy,
Storage: storageInterface,
DestroyFunc: dFunc,
}
statusStore := *store
statusStore.UpdateStrategy = job.StatusStrategy
return &REST{store}, &StatusREST{store: &statusStore}
}
示例4: Get
// Get retrieves the Job from the indexer for a given namespace and name.
func (s jobNamespaceLister) Get(name string) (*v1.Job, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(batch.Resource("job"), name)
}
return obj.(*v1.Job), nil
}
示例5: getBatchResources
// getBatchResources returns the resources for batch api
func (m *Master) getBatchResources(c *Config) map[string]rest.Storage {
// TODO update when we support more than one version of this group
version := batchapiv1.SchemeGroupVersion
storage := map[string]rest.Storage{}
if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("jobs")) {
jobsStorage, jobsStatusStorage := jobetcd.NewREST(m.GetRESTOptionsOrDie(c, batch.Resource("jobs")))
storage["jobs"] = jobsStorage
storage["jobs/status"] = jobsStatusStorage
}
return storage
}
示例6: NewREST
// NewREST returns a RESTStorage object that will work against ScheduledJobs.
func NewREST(opts generic.RESTOptions) (*REST, *StatusREST) {
prefix := "/scheduledjobs"
newListFunc := func() runtime.Object { return &batch.ScheduledJobList{} }
storageInterface := opts.Decorator(
opts.Storage, cachesize.GetWatchCacheSizeByResource(cachesize.ScheduledJobs), &batch.ScheduledJob{}, prefix, scheduledjob.Strategy, newListFunc)
store := ®istry.Store{
NewFunc: func() runtime.Object { return &batch.ScheduledJob{} },
// NewListFunc returns an object capable of storing results of an etcd list.
NewListFunc: newListFunc,
// Produces a path that etcd understands, to the root of the resource
// by combining the namespace in the context with the given prefix
KeyRootFunc: func(ctx api.Context) string {
return registry.NamespaceKeyRootFunc(ctx, prefix)
},
// Produces a path that etcd understands, to the resource by combining
// the namespace in the context with the given prefix
KeyFunc: func(ctx api.Context, name string) (string, error) {
return registry.NamespaceKeyFunc(ctx, prefix, name)
},
// Retrieve the name field of a scheduled job
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*batch.ScheduledJob).Name, nil
},
// Used to match objects based on labels/fields for list and watch
PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher {
return scheduledjob.MatchScheduledJob(label, field)
},
QualifiedResource: batch.Resource("scheduledjobs"),
DeleteCollectionWorkers: opts.DeleteCollectionWorkers,
// Used to validate scheduled job creation
CreateStrategy: scheduledjob.Strategy,
// Used to validate scheduled job updates
UpdateStrategy: scheduledjob.Strategy,
DeleteStrategy: scheduledjob.Strategy,
Storage: storageInterface,
}
statusStore := *store
statusStore.UpdateStrategy = scheduledjob.StatusStrategy
return &REST{store}, &StatusREST{store: &statusStore}
}
示例7: ForResource
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource unversioned.GroupResource) (GenericInformer, error) {
switch resource {
case api.Resource("pods"):
return &genericInformer{resource: resource, informer: f.Pods().Informer()}, nil
case api.Resource("limitranges"):
return &genericInformer{resource: resource, informer: f.LimitRanges().Informer()}, nil
case api.Resource("namespaces"):
return &genericInformer{resource: resource, informer: f.Namespaces().Informer()}, nil
case api.Resource("nodes"):
return &genericInformer{resource: resource, informer: f.Nodes().Informer()}, nil
case api.Resource("persistentvolumeclaims"):
return &genericInformer{resource: resource, informer: f.PersistentVolumeClaims().Informer()}, nil
case api.Resource("persistentvolumes"):
return &genericInformer{resource: resource, informer: f.PersistentVolumes().Informer()}, nil
case api.Resource("serviceaccounts"):
return &genericInformer{resource: resource, informer: f.ServiceAccounts().Informer()}, nil
case extensions.Resource("daemonsets"):
return &genericInformer{resource: resource, informer: f.DaemonSets().Informer()}, nil
case extensions.Resource("deployments"):
return &genericInformer{resource: resource, informer: f.Deployments().Informer()}, nil
case extensions.Resource("replicasets"):
return &genericInformer{resource: resource, informer: f.ReplicaSets().Informer()}, nil
case rbac.Resource("clusterrolebindings"):
return &genericInformer{resource: resource, informer: f.ClusterRoleBindings().Informer()}, nil
case rbac.Resource("clusterroles"):
return &genericInformer{resource: resource, informer: f.ClusterRoles().Informer()}, nil
case rbac.Resource("rolebindings"):
return &genericInformer{resource: resource, informer: f.RoleBindings().Informer()}, nil
case rbac.Resource("roles"):
return &genericInformer{resource: resource, informer: f.Roles().Informer()}, nil
case batch.Resource("jobs"):
return &genericInformer{resource: resource, informer: f.Jobs().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}
示例8: buildBatchResources
func buildBatchResources(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) {
apiGroupInfo := NewDefaultAPIGroupInfo(batch.GroupName)
storageForVersion := func(version unversioned.GroupVersion) map[string]rest.Storage {
storage := map[string]rest.Storage{}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("jobs")) {
jobsStorage, jobsStatusStorage := jobetcd.NewREST(restOptionsGetter(batch.Resource("jobs")))
storage["jobs"] = jobsStorage
storage["jobs/status"] = jobsStatusStorage
}
return storage
}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv2alpha1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[batchapiv2alpha1.SchemeGroupVersion.Version] = storageForVersion(batchapiv2alpha1.SchemeGroupVersion)
apiGroupInfo.GroupMeta.GroupVersion = batchapiv2alpha1.SchemeGroupVersion
}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[batchapiv1.SchemeGroupVersion.Version] = storageForVersion(batchapiv1.SchemeGroupVersion)
apiGroupInfo.GroupMeta.GroupVersion = batchapiv1.SchemeGroupVersion
}
return apiGroupInfo, true
}
示例9: NewREST
// NewREST returns a RESTStorage object that will work against CronJobs.
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) {
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &batch.CronJob{} },
NewListFunc: func() runtime.Object { return &batch.CronJobList{} },
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*batch.CronJob).Name, nil
},
PredicateFunc: cronjob.MatchCronJob,
QualifiedResource: batch.Resource("cronjobs"),
CreateStrategy: cronjob.Strategy,
UpdateStrategy: cronjob.Strategy,
DeleteStrategy: cronjob.Strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: cronjob.GetAttrs}
if err := store.CompleteWithOptions(options); err != nil {
panic(err) // TODO: Propagate error up
}
statusStore := *store
statusStore.UpdateStrategy = cronjob.StatusStrategy
return &REST{store}, &StatusREST{store: &statusStore}
}
示例10: Run
// Run runs the specified APIServer. This should never exit.
func Run(s *options.ServerRunOptions) error {
genericvalidation.VerifyEtcdServersList(s.GenericServerRunOptions)
genericapiserver.DefaultAndValidateRunOptions(s.GenericServerRunOptions)
genericConfig := genericapiserver.NewConfig(). // create the new config
ApplyOptions(s.GenericServerRunOptions). // apply the options selected
Complete() // set default values based on the known values
serviceIPRange, apiServerServiceIP, err := genericapiserver.DefaultServiceIPRange(s.GenericServerRunOptions.ServiceClusterIPRange)
if err != nil {
glog.Fatalf("Error determining service IP ranges: %v", err)
}
if err := genericConfig.MaybeGenerateServingCerts(apiServerServiceIP); err != nil {
glog.Fatalf("Failed to generate service certificate: %v", err)
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
})
// Setup tunneler if needed
var tunneler genericapiserver.Tunneler
var proxyDialerFn apiserver.ProxyDialerFunc
if len(s.SSHUser) > 0 {
// Get ssh key distribution func, if supported
var installSSH genericapiserver.InstallSSHKey
cloud, err := cloudprovider.InitCloudProvider(s.GenericServerRunOptions.CloudProvider, s.GenericServerRunOptions.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
if cloud != nil {
if instances, supported := cloud.Instances(); supported {
installSSH = instances.AddSSHKeyToAllInstances
}
}
if s.KubeletConfig.Port == 0 {
glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.")
}
// Set up the tunneler
// TODO(cjcullen): If we want this to handle per-kubelet ports or other
// kubelet listen-addresses, we need to plumb through options.
healthCheckPath := &url.URL{
Scheme: "https",
Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)),
Path: "healthz",
}
tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH)
// Use the tunneler's dialer to connect to the kubelet
s.KubeletConfig.Dial = tunneler.Dial
// Use the tunneler's dialer when proxying to pods, services, and nodes
proxyDialerFn = tunneler.Dial
}
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
if s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize == 0 {
// When size of cache is not explicitly set, estimate its size based on
// target memory usage.
glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
// This is the heuristics that from memory capacity is trying to infer
// the maximum number of nodes in the cluster and set cache sizes based
// on that value.
// From our documentation, we officially recomment 120GB machines for
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
// capacity per node.
// TODO: We may consider deciding that some percentage of memory will
// be used for the deserialization cache and divide it by the max object
// size to compute its size. We may even go further and measure
// collective sizes of the objects in the cache.
clusterSize := s.GenericServerRunOptions.TargetRAMMB / 60
s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize = 25 * clusterSize
if s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize < 1000 {
s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize = 1000
}
}
storageGroupsToEncodingVersion, err := s.GenericServerRunOptions.StorageGroupsToEncodingVersion()
if err != nil {
glog.Fatalf("error generating storage version map: %s", err)
}
storageFactory, err := genericapiserver.BuildDefaultStorageFactory(
s.GenericServerRunOptions.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs,
genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
// FIXME: this GroupVersionResource override should be configurable
[]schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v2alpha1")},
master.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig)
if err != nil {
glog.Fatalf("error in initializing storage factory: %s", err)
}
storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs"))
//.........这里部分代码省略.........
示例11: BuildKubernetesMasterConfig
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, admissionControl admission.Interface, originAuthenticator authenticator.Request) (*MasterConfig, error) {
if options.KubernetesMasterConfig == nil {
return nil, errors.New("insufficient information to build KubernetesMasterConfig")
}
kubeletClientConfig := configapi.GetKubeletClientConfig(options)
kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig)
if err != nil {
return nil, fmt.Errorf("unable to configure Kubelet client: %v", err)
}
// in-order list of plug-ins that should intercept admission decisions
// TODO: Push node environment support to upstream in future
_, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress)
if err != nil {
return nil, err
}
port, err := strconv.Atoi(portString)
if err != nil {
return nil, err
}
portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange)
if err != nil {
return nil, err
}
podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout)
if err != nil {
return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err)
}
// Defaults are tested in TestAPIServerDefaults
server := apiserveroptions.NewAPIServer()
// Adjust defaults
server.EventTTL = 2 * time.Hour
server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet))
server.ServiceNodePortRange = *portRange
server.EnableLogsSupport = false // don't expose server logs
server.EnableProfiling = false
server.APIPrefix = KubeAPIPrefix
server.APIGroupPrefix = KubeAPIGroupPrefix
server.SecurePort = port
server.MasterCount = options.KubernetesMasterConfig.MasterCount
// resolve extended arguments
// TODO: this should be done in config validation (along with the above) so we can provide
// proper errors
if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 {
return nil, kerrors.NewAggregate(err)
}
// Defaults are tested in TestCMServerDefaults
cmserver := cmapp.NewCMServer()
// Adjust defaults
cmserver.Address = "" // no healthz endpoint
cmserver.Port = 0 // no healthz endpoint
cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout}
cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled
// resolve extended arguments
// TODO: this should be done in config validation (along with the above) so we can provide
// proper errors
if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 {
return nil, kerrors.NewAggregate(err)
}
cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile)
if err != nil {
return nil, err
}
if cloud != nil {
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
}
var proxyClientCerts []tls.Certificate
if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 {
clientCert, err := tls.LoadX509KeyPair(
options.KubernetesMasterConfig.ProxyClientInfo.CertFile,
options.KubernetesMasterConfig.ProxyClientInfo.KeyFile,
)
if err != nil {
return nil, err
}
proxyClientCerts = append(proxyClientCerts, clientCert)
}
resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig()
resourceEncodingConfig.SetVersionEncoding(
kapi.GroupName,
unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion},
kapi.SchemeGroupVersion,
)
resourceEncodingConfig.SetVersionEncoding(
extensions.GroupName,
unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"},
extensions.SchemeGroupVersion,
)
//.........这里部分代码省略.........
示例12: shouldCheckResource
type podNodeConstraints struct {
*admission.Handler
selectorLabelBlacklist sets.String
config *api.PodNodeConstraintsConfig
authorizer authorizer.Authorizer
}
// resourcesToCheck is a map of resources and corresponding kinds of things that
// we want handled in this plugin
// TODO: Include a function that will extract the PodSpec from the resource for
// each type added here.
var resourcesToCheck = map[unversioned.GroupResource]unversioned.GroupKind{
kapi.Resource("pods"): kapi.Kind("Pod"),
kapi.Resource("podtemplates"): kapi.Kind("PodTemplate"),
kapi.Resource("replicationcontrollers"): kapi.Kind("ReplicationController"),
batch.Resource("jobs"): batch.Kind("Job"),
extensions.Resource("deployments"): extensions.Kind("Deployment"),
extensions.Resource("replicasets"): extensions.Kind("ReplicaSet"),
extensions.Resource("jobs"): extensions.Kind("Job"),
deployapi.Resource("deploymentconfigs"): deployapi.Kind("DeploymentConfig"),
}
// resourcesToIgnore is a list of resource kinds that contain a PodSpec that
// we choose not to handle in this plugin
var resourcesToIgnore = []unversioned.GroupKind{
extensions.Kind("DaemonSet"),
}
func shouldCheckResource(resource unversioned.GroupResource, kind unversioned.GroupKind) (bool, error) {
expectedKind, shouldCheck := resourcesToCheck[resource]
if !shouldCheck {
示例13: TestJobScale
func TestJobScale(t *testing.T) {
fakeClientset := fake.NewSimpleClientset(job())
scaler := JobScaler{fakeClientset.Batch()}
preconditions := ScalePrecondition{-1, ""}
count := uint(3)
name := "foo"
scaler.Scale("default", name, count, &preconditions, nil, nil)
actions := fakeClientset.Actions()
if len(actions) != 2 {
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
}
if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name {
t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
}
if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) {
t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count)
}
}
示例14: TestPodNodeConstraintsResources
func TestPodNodeConstraintsResources(t *testing.T) {
ns := kapi.NamespaceDefault
testconfigs := []struct {
config *api.PodNodeConstraintsConfig
userinfo user.Info
reviewResponse *authorizationapi.SubjectAccessReviewResponse
}{
{
config: testConfig(),
userinfo: serviceaccount.UserInfo("", "", ""),
reviewResponse: reviewResponse(false, ""),
},
}
testresources := []struct {
resource func(bool) runtime.Object
kind unversioned.GroupKind
groupresource unversioned.GroupResource
prefix string
}{
{
resource: replicationController,
kind: kapi.Kind("ReplicationController"),
groupresource: kapi.Resource("replicationcontrollers"),
prefix: "ReplicationController",
},
{
resource: deployment,
kind: extensions.Kind("Deployment"),
groupresource: extensions.Resource("deployments"),
prefix: "Deployment",
},
{
resource: replicaSet,
kind: extensions.Kind("ReplicaSet"),
groupresource: extensions.Resource("replicasets"),
prefix: "ReplicaSet",
},
{
resource: job,
kind: extensions.Kind("Job"),
groupresource: extensions.Resource("jobs"),
prefix: "Job",
},
{
resource: job,
kind: batch.Kind("Job"),
groupresource: batch.Resource("jobs"),
prefix: "Job",
},
{
resource: deploymentConfig,
kind: deployapi.Kind("DeploymentConfig"),
groupresource: deployapi.Resource("deploymentconfigs"),
prefix: "DeploymentConfig",
},
{
resource: podTemplate,
kind: deployapi.Kind("PodTemplate"),
groupresource: deployapi.Resource("podtemplates"),
prefix: "PodTemplate",
},
{
resource: podSecurityPolicySubjectReview,
kind: securityapi.Kind("PodSecurityPolicySubjectReview"),
groupresource: securityapi.Resource("podsecuritypolicysubjectreviews"),
prefix: "PodSecurityPolicy",
},
{
resource: podSecurityPolicySelfSubjectReview,
kind: securityapi.Kind("PodSecurityPolicySelfSubjectReview"),
groupresource: securityapi.Resource("podsecuritypolicyselfsubjectreviews"),
prefix: "PodSecurityPolicy",
},
{
resource: podSecurityPolicyReview,
kind: securityapi.Kind("PodSecurityPolicyReview"),
groupresource: securityapi.Resource("podsecuritypolicyreviews"),
prefix: "PodSecurityPolicy",
},
}
testparams := []struct {
nodeselector bool
expectedErrorMsg string
prefix string
}{
{
nodeselector: true,
expectedErrorMsg: "node selection by label(s) [bogus] is prohibited by policy for your role",
prefix: "with nodeSelector",
},
{
nodeselector: false,
expectedErrorMsg: "",
prefix: "without nodeSelector",
},
}
testops := []struct {
operation admission.Operation
}{
{
//.........这里部分代码省略.........
示例15: Run
//.........这里部分代码省略.........
}
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
if s.Etcd.StorageConfig.DeserializationCacheSize == 0 {
// When size of cache is not explicitly set, estimate its size based on
// target memory usage.
glog.V(2).Infof("Initializing deserialization cache size based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
// This is the heuristics that from memory capacity is trying to infer
// the maximum number of nodes in the cluster and set cache sizes based
// on that value.
// From our documentation, we officially recomment 120GB machines for
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
// capacity per node.
// TODO: We may consider deciding that some percentage of memory will
// be used for the deserialization cache and divide it by the max object
// size to compute its size. We may even go further and measure
// collective sizes of the objects in the cache.
clusterSize := s.GenericServerRunOptions.TargetRAMMB / 60
s.Etcd.StorageConfig.DeserializationCacheSize = 25 * clusterSize
if s.Etcd.StorageConfig.DeserializationCacheSize < 1000 {
s.Etcd.StorageConfig.DeserializationCacheSize = 1000
}
}
storageGroupsToEncodingVersion, err := s.GenericServerRunOptions.StorageGroupsToEncodingVersion()
if err != nil {
return fmt.Errorf("error generating storage version map: %s", err)
}
storageFactory, err := kubeapiserver.BuildDefaultStorageFactory(
s.Etcd.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs,
genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
// FIXME: this GroupVersionResource override should be configurable
[]schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v2alpha1")},
master.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig)
if err != nil {
return fmt.Errorf("error in initializing storage factory: %s", err)
}
storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers"))
for _, override := range s.Etcd.EtcdServersOverrides {
tokens := strings.Split(override, "#")
if len(tokens) != 2 {
glog.Errorf("invalid value of etcd server overrides: %s", override)
continue
}
apiresource := strings.Split(tokens[0], "/")
if len(apiresource) != 2 {
glog.Errorf("invalid resource definition: %s", tokens[0])
continue
}
group := apiresource[0]
resource := apiresource[1]
groupResource := schema.GroupResource{Group: group, Resource: resource}
servers := strings.Split(tokens[1], ";")
storageFactory.SetEtcdLocation(groupResource, servers)
}
// Default to the private server key for service account token signing
if len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != "" {
if kubeauthenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) {
s.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile}
} else {