本文整理汇总了Golang中github.com/openshift/origin/pkg/cmd/util.VersionedPrintObject函数的典型用法代码示例。如果您正苦于以下问题:Golang VersionedPrintObject函数的具体用法?Golang VersionedPrintObject怎么用?Golang VersionedPrintObject使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了VersionedPrintObject函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: RunReconcileClusterRoles
// RunReconcileClusterRoles contains all the necessary functionality for the OpenShift cli reconcile-cluster-roles command
func (o *ReconcileClusterRolesOptions) RunReconcileClusterRoles(cmd *cobra.Command, f *clientcmd.Factory) error {
changedClusterRoles, err := o.ChangedClusterRoles()
if err != nil {
return err
}
if len(changedClusterRoles) == 0 {
return nil
}
if (len(o.Output) != 0) && !o.Confirmed {
list := &kapi.List{}
for _, item := range changedClusterRoles {
list.Items = append(list.Items, item)
}
fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, o.Out)
if err := fn(list); err != nil {
return err
}
}
if o.Confirmed {
return o.ReplaceChangedRoles(changedClusterRoles)
}
return nil
}
示例2: Complete
func (o *AppJSONOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string) error {
version, _ := cmd.Flags().GetString("output-version")
for _, v := range strings.Split(version, ",") {
gv, err := unversioned.ParseGroupVersion(v)
if err != nil {
return fmt.Errorf("provided output-version %q is not valid: %v", v, err)
}
o.OutputVersions = append(o.OutputVersions, gv)
}
o.OutputVersions = append(o.OutputVersions, registered.EnabledVersions()...)
o.Action.Bulk.Mapper = clientcmd.ResourceMapper(f)
o.Action.Bulk.Op = configcmd.Create
mapper, _ := f.Object(false)
o.PrintObject = cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, o.Action.Out)
o.Generator, _ = cmd.Flags().GetString("generator")
ns, _, err := f.DefaultNamespace()
if err != nil {
return err
}
o.Namespace = ns
o.Client, _, err = f.Clients()
return err
}
示例3: RunReconcileClusterRoles
// RunReconcileClusterRoles contains all the necessary functionality for the OpenShift cli reconcile-cluster-roles command
func (o *ReconcileClusterRolesOptions) RunReconcileClusterRoles(cmd *cobra.Command, f *clientcmd.Factory) error {
changedClusterRoles, skippedClusterRoles, err := o.ChangedClusterRoles()
if err != nil {
return err
}
if len(skippedClusterRoles) > 0 {
fmt.Fprintf(o.ErrOut, "Skipped reconciling roles with the annotation %s=true\n", ReconcileProtectAnnotation)
for _, role := range skippedClusterRoles {
fmt.Fprintf(o.ErrOut, "skipped: clusterrole/%s\n", role.Name)
}
}
if len(changedClusterRoles) == 0 {
return nil
}
if (len(o.Output) != 0) && !o.Confirmed {
list := &kapi.List{}
for _, item := range changedClusterRoles {
list.Items = append(list.Items, item)
}
mapper, _ := f.Object(false)
fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, o.Out)
if err := fn(list); err != nil {
return err
}
}
if o.Confirmed {
return o.ReplaceChangedRoles(changedClusterRoles)
}
return nil
}
示例4: RunReconcileSCCs
// RunReconcileSCCs contains the functionality for the reconcile-sccs command for making or
// previewing changes.
func (o *ReconcileSCCOptions) RunReconcileSCCs(cmd *cobra.Command, f *clientcmd.Factory) error {
// get sccs that need updated
changedSCCs, err := o.ChangedSCCs()
if err != nil {
return err
}
if len(changedSCCs) == 0 {
return nil
}
if !o.Confirmed {
list := &kapi.List{}
for _, item := range changedSCCs {
list.Items = append(list.Items, item)
}
mapper, _ := f.Object(false)
fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, o.Out)
if err := fn(list); err != nil {
return err
}
}
if o.Confirmed {
return o.ReplaceChangedSCCs(changedSCCs)
}
return nil
}
示例5: Complete
// Complete sets any default behavior for the command
func (o *NewAppOptions) Complete(commandName string, f *clientcmd.Factory, c *cobra.Command, args []string, out io.Writer) error {
o.Out = out
o.ErrOut = c.Out()
o.Output = kcmdutil.GetFlagString(c, "output")
// Only output="" should print descriptions of intermediate steps. Everything
// else should print only some specific output (json, yaml, go-template, ...)
if len(o.Output) == 0 {
o.Config.Out = o.Out
} else {
o.Config.Out = ioutil.Discard
}
o.Config.ErrOut = o.ErrOut
o.Action.Out, o.Action.ErrOut = o.Out, o.ErrOut
o.Action.Bulk.Mapper = clientcmd.ResourceMapper(f)
o.Action.Bulk.Op = configcmd.Create
// Retry is used to support previous versions of the API server that will
// consider the presence of an unknown trigger type to be an error.
o.Action.Bulk.Retry = retryBuildConfig
o.Config.DryRun = o.Action.DryRun
o.CommandPath = c.CommandPath()
o.CommandName = commandName
mapper, _ := f.Object(false)
o.PrintObject = cmdutil.VersionedPrintObject(f.PrintObject, c, mapper, out)
o.LogsForObject = f.LogsForObject
if err := CompleteAppConfig(o.Config, f, c, args); err != nil {
return err
}
if err := setAppConfigLabels(c, o.Config); err != nil {
return err
}
return nil
}
示例6: Complete
// Complete sets any default behavior for the command
func (o *NewAppOptions) Complete(commandName string, f *clientcmd.Factory, c *cobra.Command, args []string, out io.Writer) error {
o.Out = out
o.ErrOut = c.Out()
o.Output = kcmdutil.GetFlagString(c, "output")
// Only output="" should print descriptions of intermediate steps. Everything
// else should print only some specific output (json, yaml, go-template, ...)
if len(o.Output) == 0 {
o.Config.Out = o.Out
} else {
o.Config.Out = ioutil.Discard
}
o.Config.ErrOut = o.ErrOut
o.CommandPath = c.CommandPath()
o.CommandName = commandName
o.PrintObject = cmdutil.VersionedPrintObject(f.PrintObject, c, out)
o.LogsForObject = f.LogsForObject
if err := CompleteAppConfig(o.Config, f, c, args); err != nil {
return err
}
if err := setAppConfigLabels(c, o.Config); err != nil {
return err
}
return nil
}
示例7: RunReconcileClusterRoleBindings
func (o *ReconcileClusterRoleBindingsOptions) RunReconcileClusterRoleBindings(cmd *cobra.Command, f *clientcmd.Factory) error {
changedClusterRoleBindings, fetchErr := o.ChangedClusterRoleBindings()
if fetchErr != nil && !IsClusterRoleBindingLookupError(fetchErr) {
// we got an error that isn't due to a partial match, so we can't continue
return fetchErr
}
if len(changedClusterRoleBindings) == 0 {
return fetchErr
}
if (len(o.Output) != 0) && !o.Confirmed {
list := &kapi.List{}
for _, item := range changedClusterRoleBindings {
list.Items = append(list.Items, item)
}
mapper, _ := f.Object(false)
fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, o.Out)
if err := fn(list); err != nil {
return kutilerrors.NewAggregate([]error{fetchErr, err})
}
}
if o.Confirmed {
if err := o.ReplaceChangedRoleBindings(changedClusterRoleBindings); err != nil {
return kutilerrors.NewAggregate([]error{fetchErr, err})
}
}
return fetchErr
}
示例8: Run
// Run runs the ipfailover command.
func Run(f *clientcmd.Factory, options *ipfailover.IPFailoverConfigCmdOptions, cmd *cobra.Command, args []string) error {
name, err := getConfigurationName(args)
if err != nil {
return err
}
if len(options.ServiceAccount) == 0 {
return fmt.Errorf("you must specify a service account for the ipfailover pod with --service-account, it cannot be blank")
}
options.Action.Bulk.Mapper = clientcmd.ResourceMapper(f)
options.Action.Bulk.Op = configcmd.Create
if err := ipfailover.ValidateCmdOptions(options); err != nil {
return err
}
p, err := getPlugin(name, f, options)
if err != nil {
return err
}
list, err := p.Generate()
if err != nil {
return err
}
namespace, _, err := f.DefaultNamespace()
if err != nil {
return err
}
_, kClient, _, err := f.Clients()
if err != nil {
return fmt.Errorf("error getting client: %v", err)
}
if err := validateServiceAccount(kClient, namespace, options.ServiceAccount); err != nil {
return fmt.Errorf("ipfailover could not be created; %v", err)
}
configList := []runtime.Object{
&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: options.ServiceAccount}},
}
list.Items = append(configList, list.Items...)
if options.Action.ShouldPrint() {
mapper, _ := f.Object(false)
return cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, options.Action.Out)(list)
}
if errs := options.Action.WithMessage(fmt.Sprintf("Creating IP failover %s", name), "created").Run(list, namespace); len(errs) > 0 {
return cmdutil.ErrExit
}
return nil
}
示例9: Complete
// Complete sets any default behavior for the command
func (o *NewBuildOptions) Complete(baseName, commandName string, f *clientcmd.Factory, c *cobra.Command, args []string, out, errout io.Writer, in io.Reader) error {
o.In = in
o.Out = out
o.ErrOut = errout
o.Output = kcmdutil.GetFlagString(c, "output")
// Only output="" should print descriptions of intermediate steps. Everything
// else should print only some specific output (json, yaml, go-template, ...)
o.Config.In = in
if len(o.Output) == 0 {
o.Config.Out = o.Out
} else {
o.Config.Out = ioutil.Discard
}
o.Config.ErrOut = o.ErrOut
o.Action.Out, o.Action.ErrOut = o.Out, o.ErrOut
o.Action.Bulk.Mapper = clientcmd.ResourceMapper(f)
o.Action.Bulk.Op = configcmd.Create
// Retry is used to support previous versions of the API server that will
// consider the presence of an unknown trigger type to be an error.
o.Action.Bulk.Retry = retryBuildConfig
o.Config.DryRun = o.Action.DryRun
o.Config.AllowNonNumericExposedPorts = true
o.BaseName = baseName
o.CommandPath = c.CommandPath()
o.CommandName = commandName
cmdutil.WarnAboutCommaSeparation(o.ErrOut, o.Config.Environment, "--env")
mapper, _ := f.Object(false)
o.PrintObject = cmdutil.VersionedPrintObject(f.PrintObject, c, mapper, out)
o.LogsForObject = f.LogsForObject
if err := CompleteAppConfig(o.Config, f, c, args); err != nil {
return err
}
if o.Config.Dockerfile == "-" {
data, err := ioutil.ReadAll(in)
if err != nil {
return err
}
o.Config.Dockerfile = string(data)
}
if err := setAppConfigLabels(c, o.Config); err != nil {
return err
}
return nil
}
示例10: Run
// Run runs the ipfailover command.
func Run(f *clientcmd.Factory, options *ipfailover.IPFailoverConfigCmdOptions, cmd *cobra.Command, args []string) error {
name, err := getConfigurationName(args)
if err != nil {
return err
}
options.Action.Bulk.Mapper = clientcmd.ResourceMapper(f)
options.Action.Bulk.Op = configcmd.Create
if err := ipfailover.ValidateCmdOptions(options); err != nil {
return err
}
p, err := getPlugin(name, f, options)
if err != nil {
return err
}
list, err := p.Generate()
if err != nil {
return err
}
if options.Action.ShouldPrint() {
mapper, _ := f.Object(false)
return cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, options.Action.Out)(list)
}
namespace, _, err := f.DefaultNamespace()
if err != nil {
return err
}
if errs := options.Action.WithMessage(fmt.Sprintf("Creating IP failover %s", name), "created").Run(list, namespace); len(errs) > 0 {
return cmdutil.ErrExit
}
return nil
}
示例11: RunCmdRegistry
//.........这里部分代码省略.........
},
},
Volumes: append(volumes, kapi.Volume{
Name: "registry-storage",
VolumeSource: kapi.VolumeSource{},
}),
ServiceAccountName: opts.Config.ServiceAccount,
},
}
if mountHost {
podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].HostPath = &kapi.HostPathVolumeSource{Path: opts.Config.HostMount}
} else {
podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].EmptyDir = &kapi.EmptyDirVolumeSource{}
}
objects := []runtime.Object{}
for _, s := range secrets {
objects = append(objects, s)
}
if needServiceAccountRole {
objects = append(objects,
&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: opts.Config.ServiceAccount}},
&authapi.ClusterRoleBinding{
ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("registry-%s-role", opts.Config.Name)},
Subjects: []kapi.ObjectReference{
{
Kind: "ServiceAccount",
Name: opts.Config.ServiceAccount,
Namespace: opts.namespace,
},
},
RoleRef: kapi.ObjectReference{
Kind: "ClusterRole",
Name: "system:registry",
},
},
)
}
if opts.Config.DaemonSet {
objects = append(objects, &extensions.DaemonSet{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Labels: opts.label,
},
Spec: extensions.DaemonSetSpec{
Template: kapi.PodTemplateSpec{
ObjectMeta: podTemplate.ObjectMeta,
Spec: podTemplate.Spec,
},
},
})
} else {
objects = append(objects, &deployapi.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Labels: opts.label,
},
Spec: deployapi.DeploymentConfigSpec{
Replicas: opts.Config.Replicas,
Selector: opts.label,
Triggers: []deployapi.DeploymentTriggerPolicy{
{Type: deployapi.DeploymentTriggerOnConfigChange},
},
Template: podTemplate,
},
})
}
objects = app.AddServices(objects, true)
// Set registry service's sessionAffinity to ClientIP to prevent push
// failures due to a use of poorly consistent storage shared by
// multiple replicas. Also reuse the cluster IP if provided to avoid
// changing the internal value.
for _, obj := range objects {
switch t := obj.(type) {
case *kapi.Service:
t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP
t.Spec.ClusterIP = clusterIP
}
}
// TODO: label all created objects with the same label
list := &kapi.List{Items: objects}
if opts.Config.Action.ShouldPrint() {
mapper, _ := opts.factory.Object(false)
fn := cmdutil.VersionedPrintObject(opts.factory.PrintObject, opts.cmd, mapper, opts.out)
if err := fn(list); err != nil {
return fmt.Errorf("unable to print object: %v", err)
}
return nil
}
if errs := opts.Config.Action.WithMessage(fmt.Sprintf("Creating registry %s", opts.Config.Name), "created").Run(list, opts.namespace); len(errs) > 0 {
return cmdutil.ErrExit
}
return nil
}
示例12: Run
// Run creates the GroupSyncer specified and runs it to sync groups
// the arguments are only here because its the only way to get the printer we need
func (o *SyncOptions) Run(cmd *cobra.Command, f *clientcmd.Factory) error {
bindPassword, err := api.ResolveStringValue(o.Config.BindPassword)
if err != nil {
return err
}
clientConfig, err := ldaputil.NewLDAPClientConfig(o.Config.URL, o.Config.BindDN, bindPassword, o.Config.CA, o.Config.Insecure)
if err != nil {
return fmt.Errorf("could not determine LDAP client configuration: %v", err)
}
errorHandler := o.CreateErrorHandler()
syncBuilder, err := buildSyncBuilder(clientConfig, o.Config, errorHandler)
if err != nil {
return err
}
// populate schema-independent syncer fields
syncer := &syncgroups.LDAPGroupSyncer{
Host: clientConfig.Host(),
GroupClient: o.GroupInterface,
DryRun: !o.Confirm,
Out: o.Out,
Err: os.Stderr,
}
switch o.Source {
case GroupSyncSourceOpenShift:
// when your source of ldapGroupUIDs is from an openshift group, the mapping of ldapGroupUID to openshift group name is logically
// pinned by the existing mapping.
listerMapper, err := getOpenShiftGroupListerMapper(clientConfig.Host(), o)
if err != nil {
return err
}
syncer.GroupLister = listerMapper
syncer.GroupNameMapper = listerMapper
case GroupSyncSourceLDAP:
syncer.GroupLister, err = getLDAPGroupLister(syncBuilder, o)
if err != nil {
return err
}
syncer.GroupNameMapper, err = getGroupNameMapper(syncBuilder, o)
if err != nil {
return err
}
default:
return fmt.Errorf("invalid group source: %v", o.Source)
}
syncer.GroupMemberExtractor, err = syncBuilder.GetGroupMemberExtractor()
if err != nil {
return err
}
syncer.UserNameMapper, err = syncBuilder.GetUserNameMapper()
if err != nil {
return err
}
// Now we run the Syncer and report any errors
openshiftGroups, syncErrors := syncer.Sync()
if o.Confirm {
return kerrs.NewAggregate(syncErrors)
}
list := &kapi.List{}
for _, item := range openshiftGroups {
list.Items = append(list.Items, item)
}
mapper, _ := f.Object()
fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, o.Out)
if err := fn(list); err != nil {
return err
}
return kerrs.NewAggregate(syncErrors)
}
示例13: RunCmdRegistry
//.........这里部分代码省略.........
},
},
Volumes: append(volumes, kapi.Volume{
Name: "registry-storage",
VolumeSource: kapi.VolumeSource{},
}),
ServiceAccountName: cfg.ServiceAccount,
},
}
if mountHost {
podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount}
} else {
podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].EmptyDir = &kapi.EmptyDirVolumeSource{}
}
objects := []runtime.Object{}
for _, s := range secrets {
objects = append(objects, s)
}
if needServiceAccountRole {
objects = append(objects,
&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}},
&authapi.ClusterRoleBinding{
ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("registry-%s-role", cfg.Name)},
Subjects: []kapi.ObjectReference{
{
Kind: "ServiceAccount",
Name: cfg.ServiceAccount,
Namespace: namespace,
},
},
RoleRef: kapi.ObjectReference{
Kind: "ClusterRole",
Name: "system:registry",
},
},
)
}
if cfg.DaemonSet {
objects = append(objects, &extensions.DaemonSet{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Labels: label,
},
Spec: extensions.DaemonSetSpec{
Template: kapi.PodTemplateSpec{
ObjectMeta: podTemplate.ObjectMeta,
Spec: podTemplate.Spec,
},
},
})
} else {
objects = append(objects, &deployapi.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Labels: label,
},
Spec: deployapi.DeploymentConfigSpec{
Replicas: cfg.Replicas,
Selector: label,
Triggers: []deployapi.DeploymentTriggerPolicy{
{Type: deployapi.DeploymentTriggerOnConfigChange},
},
Template: podTemplate,
},
})
}
objects = app.AddServices(objects, true)
// Set registry service's sessionAffinity to ClientIP to prevent push
// failures due to a use of poorly consistent storage shared by
// multiple replicas. Also reuse the cluster IP if provided to avoid
// changing the internal value.
for _, obj := range objects {
switch t := obj.(type) {
case *kapi.Service:
t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP
t.Spec.ClusterIP = clusterIP
}
}
// TODO: label all created objects with the same label
list := &kapi.List{Items: objects}
if cfg.Action.ShouldPrint() {
mapper, _ := f.Object(false)
fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, out)
if err := fn(list); err != nil {
return fmt.Errorf("unable to print object: %v", err)
}
return nil
}
if errs := cfg.Action.WithMessage(fmt.Sprintf("Creating registry %s", cfg.Name), "created").Run(list, namespace); len(errs) > 0 {
return cmdutil.ErrExit
}
return nil
}
示例14: RunCmdRouter
//.........这里部分代码省略.........
if cfg.StatsPort > 0 && cfg.ExposeMetrics {
pc := generateMetricsExporterContainer(cfg, env)
if pc != nil {
containers = append(containers, *pc)
}
}
objects := []runtime.Object{}
for _, s := range secrets {
objects = append(objects, s)
}
if createServiceAccount {
objects = append(objects,
&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}},
&authapi.ClusterRoleBinding{
ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("router-%s-role", cfg.Name)},
Subjects: []kapi.ObjectReference{
{
Kind: "ServiceAccount",
Name: cfg.ServiceAccount,
Namespace: namespace,
},
},
RoleRef: kapi.ObjectReference{
Kind: "ClusterRole",
Name: "system:router",
},
},
)
}
updatePercent := int(-25)
objects = append(objects, &deployapi.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Labels: label,
},
Spec: deployapi.DeploymentConfigSpec{
Strategy: deployapi.DeploymentStrategy{
Type: deployapi.DeploymentStrategyTypeRolling,
RollingParams: &deployapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent},
},
Replicas: cfg.Replicas,
Selector: label,
Triggers: []deployapi.DeploymentTriggerPolicy{
{Type: deployapi.DeploymentTriggerOnConfigChange},
},
Template: &kapi.PodTemplateSpec{
ObjectMeta: kapi.ObjectMeta{Labels: label},
Spec: kapi.PodSpec{
SecurityContext: &kapi.PodSecurityContext{
HostNetwork: cfg.HostNetwork,
},
ServiceAccountName: cfg.ServiceAccount,
NodeSelector: nodeSelector,
Containers: containers,
Volumes: volumes,
},
},
},
})
objects = app.AddServices(objects, false)
// set the service port to the provided output port value
for i := range objects {
switch t := objects[i].(type) {
case *kapi.Service:
for j, servicePort := range t.Spec.Ports {
for _, targetPort := range ports {
if targetPort.ContainerPort == servicePort.Port && targetPort.HostPort != 0 {
t.Spec.Ports[j].Port = targetPort.HostPort
}
}
}
}
}
// TODO: label all created objects with the same label - router=<name>
list := &kapi.List{Items: objects}
if output {
fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, out)
if err := fn(list); err != nil {
return fmt.Errorf("unable to print object: %v", err)
}
return defaultOutputErr
}
mapper, typer := f.Factory.Object()
bulk := configcmd.Bulk{
Mapper: mapper,
Typer: typer,
RESTClientFactory: f.Factory.ClientForMapping,
After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()),
}
if errs := bulk.Create(list, namespace); len(errs) != 0 {
return cmdutil.ErrExit
}
return nil
}