本文整理汇总了Golang中k8s/io/kubernetes/pkg/apis/autoscaling.Resource函数的典型用法代码示例。如果您正苦于以下问题:Golang Resource函数的具体用法?Golang Resource怎么用?Golang Resource使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Resource函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Get
func (r *ScaleREST) Get(ctx api.Context, name string) (runtime.Object, error) {
rc, err := r.registry.GetController(ctx, name)
if err != nil {
return nil, errors.NewNotFound(autoscaling.Resource("replicationcontrollers/scale"), name)
}
return scaleFromRC(rc), nil
}
示例2: Update
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
if obj == nil {
return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
}
scale, ok := obj.(*autoscaling.Scale)
if !ok {
return nil, false, errors.NewBadRequest(fmt.Sprintf("expected input object type to be Scale, but %T", obj))
}
if errs := asvalidation.ValidateScale(scale); len(errs) > 0 {
return nil, false, errors.NewInvalid(autoscaling.Kind("Scale"), scale.Name, errs)
}
deployment, err := r.registry.GetDeployment(ctx, scale.Name)
if err != nil {
return nil, false, errors.NewNotFound(autoscaling.Resource("deployments/scale"), scale.Name)
}
deployment.Spec.Replicas = scale.Spec.Replicas
deployment.ResourceVersion = scale.ResourceVersion
deployment, err = r.registry.UpdateDeployment(ctx, deployment)
if err != nil {
return nil, false, err
}
newScale, err := scaleFromDeployment(deployment)
if err != nil {
return nil, false, errors.NewBadRequest(fmt.Sprintf("%v", err))
}
return newScale, false, nil
}
示例3: Update
func (r *ScaleREST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
rc, err := r.registry.GetController(ctx, name)
if err != nil {
return nil, false, errors.NewNotFound(autoscaling.Resource("replicationcontrollers/scale"), name)
}
oldScale := scaleFromRC(rc)
obj, err := objInfo.UpdatedObject(ctx, oldScale)
if err != nil {
return nil, false, err
}
if obj == nil {
return nil, false, errors.NewBadRequest("nil update passed to Scale")
}
scale, ok := obj.(*autoscaling.Scale)
if !ok {
return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
}
if errs := validation.ValidateScale(scale); len(errs) > 0 {
return nil, false, errors.NewInvalid(autoscaling.Kind("Scale"), scale.Name, errs)
}
rc.Spec.Replicas = scale.Spec.Replicas
rc.ResourceVersion = scale.ResourceVersion
rc, err = r.registry.UpdateController(ctx, rc)
if err != nil {
return nil, false, err
}
return scaleFromRC(rc), false, nil
}
示例4: Update
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
if obj == nil {
return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
}
scale, ok := obj.(*autoscaling.Scale)
if !ok {
return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
}
if errs := asvalidation.ValidateScale(scale); len(errs) > 0 {
return nil, false, errors.NewInvalid(autoscaling.Kind("Scale"), scale.Name, errs)
}
rs, err := r.registry.GetReplicaSet(ctx, scale.Name)
if err != nil {
return nil, false, errors.NewNotFound(autoscaling.Resource("replicasets/scale"), scale.Name)
}
rs.Spec.Replicas = scale.Spec.Replicas
rs.ResourceVersion = scale.ResourceVersion
rs, err = r.registry.UpdateReplicaSet(ctx, rs)
if err != nil {
return nil, false, err
}
newScale, err := scaleFromReplicaSet(rs)
if err != nil {
return nil, false, errors.NewBadRequest(fmt.Sprintf("%v", err))
}
return newScale, false, err
}
示例5: Get
// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name.
func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(autoscaling.Resource("horizontalpodautoscaler"), name)
}
return obj.(*autoscaling.HorizontalPodAutoscaler), nil
}
示例6: Get
func (r *ScaleREST) Get(ctx api.Context, name string) (runtime.Object, error) {
deployment, err := r.registry.GetDeployment(ctx, name)
if err != nil {
return nil, errors.NewNotFound(autoscaling.Resource("deployments/scale"), name)
}
scale, err := scaleFromDeployment(deployment)
if err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("%v", err))
}
return scale, nil
}
示例7: Get
func (r *ScaleREST) Get(ctx api.Context, name string) (runtime.Object, error) {
rs, err := r.registry.GetReplicaSet(ctx, name)
if err != nil {
return nil, errors.NewNotFound(autoscaling.Resource("replicasets/scale"), name)
}
scale, err := scaleFromReplicaSet(rs)
if err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("%v", err))
}
return scale, err
}
示例8: NewREST
// NewREST returns a RESTStorage object that will work against horizontal pod autoscalers.
func NewREST(opts generic.RESTOptions) (*REST, *StatusREST) {
prefix := "/" + opts.ResourcePrefix
newListFunc := func() runtime.Object { return &autoscaling.HorizontalPodAutoscalerList{} }
storageInterface, dFunc := opts.Decorator(
opts.StorageConfig,
cachesize.GetWatchCacheSizeByResource(cachesize.HorizontalPodAutoscalers),
&autoscaling.HorizontalPodAutoscaler{},
prefix,
horizontalpodautoscaler.Strategy,
newListFunc,
horizontalpodautoscaler.GetAttrs,
storage.NoTriggerPublisher,
)
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &autoscaling.HorizontalPodAutoscaler{} },
// NewListFunc returns an object capable of storing results of an etcd list.
NewListFunc: newListFunc,
// Produces a path that etcd understands, to the root of the resource
// by combining the namespace in the context with the given prefix
KeyRootFunc: func(ctx api.Context) string {
return genericregistry.NamespaceKeyRootFunc(ctx, prefix)
},
// Produces a path that etcd understands, to the resource by combining
// the namespace in the context with the given prefix
KeyFunc: func(ctx api.Context, name string) (string, error) {
return genericregistry.NamespaceKeyFunc(ctx, prefix, name)
},
// Retrieve the name field of an autoscaler
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*autoscaling.HorizontalPodAutoscaler).Name, nil
},
// Used to match objects based on labels/fields for list
PredicateFunc: horizontalpodautoscaler.MatchAutoscaler,
QualifiedResource: autoscaling.Resource("horizontalpodautoscalers"),
EnableGarbageCollection: opts.EnableGarbageCollection,
DeleteCollectionWorkers: opts.DeleteCollectionWorkers,
// Used to validate autoscaler creation
CreateStrategy: horizontalpodautoscaler.Strategy,
// Used to validate autoscaler updates
UpdateStrategy: horizontalpodautoscaler.Strategy,
DeleteStrategy: horizontalpodautoscaler.Strategy,
Storage: storageInterface,
DestroyFunc: dFunc,
}
statusStore := *store
statusStore.UpdateStrategy = horizontalpodautoscaler.StatusStrategy
return &REST{store}, &StatusREST{store: &statusStore}
}
示例9: NewREST
// NewREST returns a RESTStorage object that will work against horizontal pod autoscalers.
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) {
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &autoscaling.HorizontalPodAutoscaler{} },
NewListFunc: func() runtime.Object { return &autoscaling.HorizontalPodAutoscalerList{} },
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*autoscaling.HorizontalPodAutoscaler).Name, nil
},
PredicateFunc: horizontalpodautoscaler.MatchAutoscaler,
QualifiedResource: autoscaling.Resource("horizontalpodautoscalers"),
CreateStrategy: horizontalpodautoscaler.Strategy,
UpdateStrategy: horizontalpodautoscaler.Strategy,
DeleteStrategy: horizontalpodautoscaler.Strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: horizontalpodautoscaler.GetAttrs}
if err := store.CompleteWithOptions(options); err != nil {
panic(err) // TODO: Propagate error up
}
statusStore := *store
statusStore.UpdateStrategy = horizontalpodautoscaler.StatusStrategy
return &REST{store}, &StatusREST{store: &statusStore}
}
示例10: v1Storage
func (p RESTStorageProvider) v1Storage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) map[string]rest.Storage {
version := autoscalingapiv1.SchemeGroupVersion
storage := map[string]rest.Storage{}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("horizontalpodautoscalers")) {
hpaStorage, hpaStatusStorage := horizontalpodautoscaleretcd.NewREST(restOptionsGetter(autoscaling.Resource("horizontalpodautoscalers")))
storage["horizontalpodautoscalers"] = hpaStorage
storage["horizontalpodautoscalers/status"] = hpaStatusStorage
}
return storage
}
示例11: BuildKubernetesMasterConfig
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, admissionControl admission.Interface, originAuthenticator authenticator.Request) (*MasterConfig, error) {
if options.KubernetesMasterConfig == nil {
return nil, errors.New("insufficient information to build KubernetesMasterConfig")
}
kubeletClientConfig := configapi.GetKubeletClientConfig(options)
kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig)
if err != nil {
return nil, fmt.Errorf("unable to configure Kubelet client: %v", err)
}
// in-order list of plug-ins that should intercept admission decisions
// TODO: Push node environment support to upstream in future
_, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress)
if err != nil {
return nil, err
}
port, err := strconv.Atoi(portString)
if err != nil {
return nil, err
}
portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange)
if err != nil {
return nil, err
}
podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout)
if err != nil {
return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err)
}
// Defaults are tested in TestAPIServerDefaults
server := apiserveroptions.NewAPIServer()
// Adjust defaults
server.EventTTL = 2 * time.Hour
server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet))
server.ServiceNodePortRange = *portRange
server.EnableLogsSupport = false // don't expose server logs
server.EnableProfiling = false
server.APIPrefix = KubeAPIPrefix
server.APIGroupPrefix = KubeAPIGroupPrefix
server.SecurePort = port
server.MasterCount = options.KubernetesMasterConfig.MasterCount
// resolve extended arguments
// TODO: this should be done in config validation (along with the above) so we can provide
// proper errors
if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 {
return nil, kerrors.NewAggregate(err)
}
// Defaults are tested in TestCMServerDefaults
cmserver := cmapp.NewCMServer()
// Adjust defaults
cmserver.Address = "" // no healthz endpoint
cmserver.Port = 0 // no healthz endpoint
cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout}
cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled
// resolve extended arguments
// TODO: this should be done in config validation (along with the above) so we can provide
// proper errors
if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 {
return nil, kerrors.NewAggregate(err)
}
cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile)
if err != nil {
return nil, err
}
if cloud != nil {
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
}
var proxyClientCerts []tls.Certificate
if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 {
clientCert, err := tls.LoadX509KeyPair(
options.KubernetesMasterConfig.ProxyClientInfo.CertFile,
options.KubernetesMasterConfig.ProxyClientInfo.KeyFile,
)
if err != nil {
return nil, err
}
proxyClientCerts = append(proxyClientCerts, clientCert)
}
resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig()
resourceEncodingConfig.SetVersionEncoding(
kapi.GroupName,
unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion},
kapi.SchemeGroupVersion,
)
resourceEncodingConfig.SetVersionEncoding(
extensions.GroupName,
unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"},
extensions.SchemeGroupVersion,
)
//.........这里部分代码省略.........
示例12: BuildDefaultAPIServer
// BuildDefaultAPIServer constructs the appropriate APIServer and StorageFactory for the kubernetes server.
// It returns an error if no KubernetesMasterConfig was defined.
func BuildDefaultAPIServer(options configapi.MasterConfig) (*apiserveroptions.APIServer, genericapiserver.StorageFactory, error) {
if options.KubernetesMasterConfig == nil {
return nil, nil, fmt.Errorf("no kubernetesMasterConfig defined, unable to load settings")
}
_, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress)
if err != nil {
return nil, nil, err
}
port, err := strconv.Atoi(portString)
if err != nil {
return nil, nil, err
}
portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange)
if err != nil {
return nil, nil, err
}
// Defaults are tested in TestAPIServerDefaults
server := apiserveroptions.NewAPIServer()
// Adjust defaults
server.EventTTL = 2 * time.Hour
server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet))
server.ServiceNodePortRange = *portRange
server.EnableLogsSupport = false // don't expose server logs
server.EnableProfiling = false
server.APIPrefix = KubeAPIPrefix
server.APIGroupPrefix = KubeAPIGroupPrefix
server.SecurePort = port
server.MasterCount = options.KubernetesMasterConfig.MasterCount
// resolve extended arguments
// TODO: this should be done in config validation (along with the above) so we can provide
// proper errors
if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 {
return nil, nil, kerrors.NewAggregate(err)
}
resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig()
resourceEncodingConfig.SetVersionEncoding(
kapi.GroupName,
unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion},
kapi.SchemeGroupVersion,
)
resourceEncodingConfig.SetVersionEncoding(
extensions.GroupName,
unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"},
extensions.SchemeGroupVersion,
)
resourceEncodingConfig.SetVersionEncoding(
batch.GroupName,
unversioned.GroupVersion{Group: batch.GroupName, Version: "v1"},
batch.SchemeGroupVersion,
)
resourceEncodingConfig.SetVersionEncoding(
autoscaling.GroupName,
unversioned.GroupVersion{Group: autoscaling.GroupName, Version: "v1"},
autoscaling.SchemeGroupVersion,
)
storageGroupsToEncodingVersion, err := server.StorageGroupsToEncodingVersion()
if err != nil {
return nil, nil, err
}
// use the stock storage config based on args, but override bits from our config where appropriate
etcdConfig := server.StorageConfig
etcdConfig.Prefix = options.EtcdStorageConfig.KubernetesStoragePrefix
etcdConfig.ServerList = options.EtcdClientInfo.URLs
etcdConfig.KeyFile = options.EtcdClientInfo.ClientCert.KeyFile
etcdConfig.CertFile = options.EtcdClientInfo.ClientCert.CertFile
etcdConfig.CAFile = options.EtcdClientInfo.CA
storageFactory, err := genericapiserver.BuildDefaultStorageFactory(
etcdConfig,
server.DefaultStorageMediaType,
kapi.Codecs,
genericapiserver.NewDefaultResourceEncodingConfig(),
storageGroupsToEncodingVersion,
// FIXME: this GroupVersionResource override should be configurable
[]unversioned.GroupVersionResource{batch.Resource("scheduledjobs").WithVersion("v2alpha1")},
master.DefaultAPIResourceConfigSource(), server.RuntimeConfig,
)
if err != nil {
return nil, nil, err
}
/*storageFactory := genericapiserver.NewDefaultStorageFactory(
etcdConfig,
server.DefaultStorageMediaType,
kapi.Codecs,
resourceEncodingConfig,
master.DefaultAPIResourceConfigSource(),
)*/
// the order here is important, it defines which version will be used for storage
//.........这里部分代码省略.........
示例13: buildAutoscalingResources
func buildAutoscalingResources(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) {
apiGroupInfo := NewDefaultAPIGroupInfo(autoscaling.GroupName)
storageForVersion := func(version unversioned.GroupVersion) map[string]rest.Storage {
storage := map[string]rest.Storage{}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("horizontalpodautoscalers")) {
hpaStorage, hpaStatusStorage := horizontalpodautoscaleretcd.NewREST(restOptionsGetter(autoscaling.Resource("horizontalpodautoscalers")))
storage["horizontalpodautoscalers"] = hpaStorage
storage["horizontalpodautoscalers/status"] = hpaStatusStorage
}
return storage
}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(autoscalingapiv1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[autoscalingapiv1.SchemeGroupVersion.Version] = storageForVersion(autoscalingapiv1.SchemeGroupVersion)
apiGroupInfo.GroupMeta.GroupVersion = autoscalingapiv1.SchemeGroupVersion
}
return apiGroupInfo, true
}
示例14: Run
// Run runs the specified APIServer. This should never exit.
func Run(s *options.APIServer) error {
genericapiserver.DefaultAndValidateRunOptions(s.ServerRunOptions)
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
})
// Setup tunneler if needed
var tunneler genericapiserver.Tunneler
var proxyDialerFn apiserver.ProxyDialerFunc
if len(s.SSHUser) > 0 {
// Get ssh key distribution func, if supported
var installSSH genericapiserver.InstallSSHKey
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
if cloud != nil {
if instances, supported := cloud.Instances(); supported {
installSSH = instances.AddSSHKeyToAllInstances
}
}
if s.KubeletConfig.Port == 0 {
glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.")
}
// Set up the tunneler
// TODO(cjcullen): If we want this to handle per-kubelet ports or other
// kubelet listen-addresses, we need to plumb through options.
healthCheckPath := &url.URL{
Scheme: "https",
Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)),
Path: "healthz",
}
tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH)
// Use the tunneler's dialer to connect to the kubelet
s.KubeletConfig.Dial = tunneler.Dial
// Use the tunneler's dialer when proxying to pods, services, and nodes
proxyDialerFn = tunneler.Dial
}
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
if err != nil {
glog.Fatalf("error generating storage version map: %s", err)
}
storageFactory, err := genericapiserver.BuildDefaultStorageFactory(
s.StorageConfig, s.DefaultStorageMediaType, api.Codecs,
genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
master.DefaultAPIResourceConfigSource(), s.RuntimeConfig)
if err != nil {
glog.Fatalf("error in initializing storage factory: %s", err)
}
storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs"))
storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers"))
for _, override := range s.EtcdServersOverrides {
tokens := strings.Split(override, "#")
if len(tokens) != 2 {
glog.Errorf("invalid value of etcd server overrides: %s", override)
continue
}
apiresource := strings.Split(tokens[0], "/")
if len(apiresource) != 2 {
glog.Errorf("invalid resource definition: %s", tokens[0])
continue
}
group := apiresource[0]
resource := apiresource[1]
groupResource := unversioned.GroupResource{Group: group, Resource: resource}
servers := strings.Split(tokens[1], ";")
storageFactory.SetEtcdLocation(groupResource, servers)
}
// Default to the private server key for service account token signing
if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" {
if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) {
s.ServiceAccountKeyFile = s.TLSPrivateKeyFile
} else {
glog.Warning("No RSA key provided, service account token authentication disabled")
}
}
var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter
//.........这里部分代码省略.........
示例15: Run
//.........这里部分代码省略.........
}
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
if s.Etcd.StorageConfig.DeserializationCacheSize == 0 {
// When size of cache is not explicitly set, estimate its size based on
// target memory usage.
glog.V(2).Infof("Initializing deserialization cache size based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
// This is the heuristics that from memory capacity is trying to infer
// the maximum number of nodes in the cluster and set cache sizes based
// on that value.
// From our documentation, we officially recomment 120GB machines for
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
// capacity per node.
// TODO: We may consider deciding that some percentage of memory will
// be used for the deserialization cache and divide it by the max object
// size to compute its size. We may even go further and measure
// collective sizes of the objects in the cache.
clusterSize := s.GenericServerRunOptions.TargetRAMMB / 60
s.Etcd.StorageConfig.DeserializationCacheSize = 25 * clusterSize
if s.Etcd.StorageConfig.DeserializationCacheSize < 1000 {
s.Etcd.StorageConfig.DeserializationCacheSize = 1000
}
}
storageGroupsToEncodingVersion, err := s.GenericServerRunOptions.StorageGroupsToEncodingVersion()
if err != nil {
return fmt.Errorf("error generating storage version map: %s", err)
}
storageFactory, err := kubeapiserver.BuildDefaultStorageFactory(
s.Etcd.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs,
genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
// FIXME: this GroupVersionResource override should be configurable
[]schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v2alpha1")},
master.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig)
if err != nil {
return fmt.Errorf("error in initializing storage factory: %s", err)
}
storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers"))
for _, override := range s.Etcd.EtcdServersOverrides {
tokens := strings.Split(override, "#")
if len(tokens) != 2 {
glog.Errorf("invalid value of etcd server overrides: %s", override)
continue
}
apiresource := strings.Split(tokens[0], "/")
if len(apiresource) != 2 {
glog.Errorf("invalid resource definition: %s", tokens[0])
continue
}
group := apiresource[0]
resource := apiresource[1]
groupResource := schema.GroupResource{Group: group, Resource: resource}
servers := strings.Split(tokens[1], ";")
storageFactory.SetEtcdLocation(groupResource, servers)
}
// Default to the private server key for service account token signing
if len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != "" {
if kubeauthenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) {
s.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile}
} else {