本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.RetryOnConflict函数的典型用法代码示例。如果您正苦于以下问题:Golang RetryOnConflict函数的具体用法?Golang RetryOnConflict怎么用?Golang RetryOnConflict使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RetryOnConflict函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: clearInitialNodeNetworkUnavailableCondition
// Because openshift-sdn uses an overlay and doesn't need GCE Routes, we need to
// clear the NetworkUnavailable condition that kubelet adds to initial node
// status when using GCE.
// TODO: make upstream kubelet more flexible with overlays and GCE so this
// condition doesn't get added for network plugins that don't want it, and then
// we can remove this function.
func (master *OsdnMaster) clearInitialNodeNetworkUnavailableCondition(node *kapi.Node) {
knode := node
cleared := false
resultErr := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
var err error
if knode != node {
knode, err = master.kClient.Nodes().Get(node.ObjectMeta.Name)
if err != nil {
return err
}
}
// Let caller modify knode's status, then push to api server.
_, condition := kapi.GetNodeCondition(&node.Status, kapi.NodeNetworkUnavailable)
if condition != nil && condition.Status != kapi.ConditionFalse && condition.Reason == "NoRouteCreated" {
condition.Status = kapi.ConditionFalse
condition.Reason = "RouteCreated"
condition.Message = "openshift-sdn cleared kubelet-set NoRouteCreated"
condition.LastTransitionTime = kapiunversioned.Now()
knode, err = master.kClient.Nodes().UpdateStatus(knode)
if err == nil {
cleared = true
}
}
return err
})
if resultErr != nil {
utilruntime.HandleError(fmt.Errorf("Status update failed for local node: %v", resultErr))
} else if cleared {
log.Infof("Cleared node NetworkUnavailable/NoRouteCreated condition for %s", node.ObjectMeta.Name)
}
}
示例2: updateRcWithRetries
// updateRcWithRetries retries updating the given rc on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updateRcWithRetries(c client.Interface, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
// Deep copy the rc in case we failed on Get during retry loop
obj, err := api.Scheme.Copy(rc)
if err != nil {
return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
}
oldRc := obj.(*api.ReplicationController)
err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, e = c.ReplicationControllers(namespace).Update(rc); e == nil {
// rc contains the latest controller post update
return
}
updateErr := e
// Update the controller with the latest resource version, if the update failed we
// can't trust rc so use oldRc.Name.
if rc, e = c.ReplicationControllers(namespace).Get(oldRc.Name); e != nil {
// The Get failed: Value in rc cannot be trusted.
rc = oldRc
}
// Only return the error from update
return updateErr
})
// If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned
// controller contains the applied update.
return rc, err
}
示例3: reconcileDeployments
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []kapi.ReplicationController, config *deployapi.DeploymentConfig) error {
activeDeployment := deployutil.ActiveDeployment(existingDeployments)
// Reconcile deployments. The active deployment follows the config, and all
// other deployments should be scaled to zero.
var updatedDeployments []kapi.ReplicationController
for i := range existingDeployments {
deployment := existingDeployments[i]
toAppend := deployment
isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name
oldReplicaCount := deployment.Spec.Replicas
newReplicaCount := int32(0)
if isActiveDeployment {
newReplicaCount = config.Spec.Replicas
}
if config.Spec.Test {
glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", deployutil.LabelForDeploymentConfig(config), deployutil.LabelForDeployment(&deployment))
newReplicaCount = 0
}
// Only update if necessary.
var copied *kapi.ReplicationController
if newReplicaCount != oldReplicaCount {
if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
// refresh the replication controller version
rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
if err != nil {
return err
}
copied, err = deployutil.DeploymentDeepCopy(rc)
if err != nil {
glog.V(2).Infof("Deep copy of deployment %q failed: %v", rc.Name, err)
return err
}
copied.Spec.Replicas = newReplicaCount
copied, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
return err
}); err != nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "ReplicationControllerScaleFailed",
"Failed to scale replication controler %q from %d to %d: %v", deployment.Name, oldReplicaCount, newReplicaCount, err)
return err
}
c.recorder.Eventf(config, kapi.EventTypeNormal, "ReplicationControllerScaled", "Scaled replication controller %q from %d to %d", copied.Name, oldReplicaCount, newReplicaCount)
toAppend = *copied
}
updatedDeployments = append(updatedDeployments, toAppend)
}
// As the deployment configuration has changed, we need to make sure to clean
// up old deployments if we have now reached our deployment history quota
if err := c.cleanupOldDeployments(updatedDeployments, config); err != nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "ReplicationControllerCleanupFailed", "Couldn't clean up replication controllers: %v", err)
}
return c.updateStatus(config, updatedDeployments)
}
示例4: addE2EServiceAccountsToSCC
func addE2EServiceAccountsToSCC(c *kclient.Client, namespaces *kapi.NamespaceList, sccName string) {
err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
scc, err := c.SecurityContextConstraints().Get(sccName)
if err != nil {
if apierrs.IsNotFound(err) {
return nil
}
return err
}
groups := []string{}
for _, name := range scc.Groups {
if !strings.Contains(name, "e2e-") {
groups = append(groups, name)
}
}
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.Name, "e2e-") {
groups = append(groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
}
}
scc.Groups = groups
if _, err := c.SecurityContextConstraints().Update(scc); err != nil {
return err
}
return nil
})
if err != nil {
FatalErr(err)
}
}
示例5: syncSecret
func (e *TokensController) syncSecret() {
key, quit := e.syncSecretQueue.Get()
if quit {
return
}
defer e.syncSecretQueue.Done(key)
// Track whether or not we should retry this sync
retry := false
defer func() {
e.retryOrForget(e.syncSecretQueue, key, retry)
}()
secretInfo, err := parseSecretQueueKey(key)
if err != nil {
glog.Error(err)
return
}
secret, err := e.getSecret(secretInfo.namespace, secretInfo.name, secretInfo.uid, false)
switch {
case err != nil:
glog.Error(err)
retry = true
case secret == nil:
// If the service account exists
if sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, false); saErr == nil && sa != nil {
// secret no longer exists, so delete references to this secret from the service account
if err := client.RetryOnConflict(RemoveTokenBackoff, func() error {
return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
}); err != nil {
glog.Error(err)
}
}
default:
// Ensure service account exists
sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, true)
switch {
case saErr != nil:
glog.Error(saErr)
retry = true
case sa == nil:
// Delete token
glog.V(4).Infof("syncSecret(%s/%s), service account does not exist, deleting token", secretInfo.namespace, secretInfo.name)
if retriable, err := e.deleteToken(secretInfo.namespace, secretInfo.name, secretInfo.uid); err != nil {
glog.Errorf("error deleting serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
retry = retriable
}
default:
// Update token if needed
if retriable, err := e.generateTokenIfNeeded(sa, secret); err != nil {
glog.Errorf("error populating serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
retry = retriable
}
}
}
}
示例6: TestTriggers_manual
func TestTriggers_manual(t *testing.T) {
testutil.DeleteAllEtcdKeys()
openshift := NewTestDeployOpenshift(t)
defer openshift.Close()
config := deploytest.OkDeploymentConfig(0)
config.Namespace = testutil.Namespace()
config.Triggers = []deployapi.DeploymentTriggerPolicy{
{
Type: deployapi.DeploymentTriggerManual,
},
}
dc, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Create(config)
if err != nil {
t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config)
}
watch, err := openshift.KubeClient.ReplicationControllers(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), dc.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to Deployments: %v", err)
}
defer watch.Stop()
retryErr := kclient.RetryOnConflict(wait.Backoff{Steps: maxUpdateRetries}, func() error {
config, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Generate(config.Name)
if err != nil {
return err
}
if config.LatestVersion != 1 {
t.Fatalf("Generated deployment should have version 1: %#v", config)
}
t.Logf("config(1): %#v", config)
updatedConfig, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Update(config)
if err != nil {
return err
}
t.Logf("config(2): %#v", updatedConfig)
return nil
})
if retryErr != nil {
t.Fatal(err)
}
event := <-watch.ResultChan()
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
deployment := event.Object.(*kapi.ReplicationController)
if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a {
t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a)
}
if e, a := 1, deployutil.DeploymentVersionFor(deployment); e != a {
t.Fatalf("Deployment annotation version does not match: %#v", deployment)
}
}
示例7: ensureOpenShiftInfraNamespace
// ensureOpenShiftInfraNamespace is called as part of global policy initialization to ensure infra namespace exists
func (c *MasterConfig) ensureOpenShiftInfraNamespace() {
ns := c.Options.PolicyConfig.OpenShiftInfrastructureNamespace
// Ensure namespace exists
namespace, err := c.KubeClient().Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: ns}})
if kapierror.IsAlreadyExists(err) {
// Get the persisted namespace
namespace, err = c.KubeClient().Namespaces().Get(ns)
if err != nil {
glog.Errorf("Error getting namespace %s: %v", ns, err)
return
}
} else if err != nil {
glog.Errorf("Error creating namespace %s: %v", ns, err)
return
}
roleAccessor := policy.NewClusterRoleBindingAccessor(c.ServiceAccountRoleBindingClient())
for _, saName := range bootstrappolicy.InfraSAs.GetServiceAccounts() {
_, err := c.KubeClient().ServiceAccounts(ns).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: saName}})
if err != nil && !kapierror.IsAlreadyExists(err) {
glog.Errorf("Error creating service account %s/%s: %v", ns, saName, err)
}
role, _ := bootstrappolicy.InfraSAs.RoleFor(saName)
reconcileRole := &policy.ReconcileClusterRolesOptions{
RolesToReconcile: []string{role.Name},
Confirmed: true,
Union: true,
Out: ioutil.Discard,
RoleClient: c.PrivilegedLoopbackOpenShiftClient.ClusterRoles(),
}
if err := reconcileRole.RunReconcileClusterRoles(nil, nil); err != nil {
glog.Errorf("Could not reconcile %v: %v\n", role.Name, err)
}
addRole := &policy.RoleModificationOptions{
RoleName: role.Name,
RoleBindingAccessor: roleAccessor,
Subjects: []kapi.ObjectReference{{Namespace: ns, Name: saName, Kind: "ServiceAccount"}},
}
if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error { return addRole.AddRole() }); err != nil {
glog.Errorf("Could not add %v service accounts to the %v cluster role: %v\n", saName, role.Name, err)
} else {
glog.V(2).Infof("Added %v service accounts to the %v cluster role: %v\n", saName, role.Name, err)
}
}
c.ensureNamespaceServiceAccountRoleBindings(namespace)
}
示例8: retryOnConflictError
// retryOnConflictError retries the specified fn if there was a conflict error
func retryOnConflictError(kubeClient client.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) {
result = namespace
err = client.RetryOnConflict(wait.Backoff{Steps: maxRetriesOnConflict}, func() error {
if result == nil {
if result, err = kubeClient.Namespaces().Get(namespace.Name); err != nil {
return err
}
}
if result, err = fn(kubeClient, result); err != nil {
result = nil
}
return err
})
return
}
示例9: UpdateConfigWithRetries
// UpdateConfigWithRetries will try to update a deployment config and ignore any update conflicts.
func UpdateConfigWithRetries(dn DeploymentConfigsNamespacer, namespace, name string, applyUpdate updateConfigFunc) (*deployapi.DeploymentConfig, error) {
var config *deployapi.DeploymentConfig
resultErr := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
var err error
config, err = dn.DeploymentConfigs(namespace).Get(name)
if err != nil {
return err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(config)
config, err = dn.DeploymentConfigs(namespace).Update(config)
return err
})
return config, resultErr
}
示例10: ensureOAuthClient
func ensureOAuthClient(client oauthapi.OAuthClient, clientRegistry clientregistry.Registry, preserveExistingRedirects bool) error {
ctx := kapi.NewContext()
_, err := clientRegistry.CreateClient(ctx, &client)
if err == nil || !kerrs.IsAlreadyExists(err) {
return err
}
return unversioned.RetryOnConflict(unversioned.DefaultRetry, func() error {
existing, err := clientRegistry.GetClient(ctx, client.Name)
if err != nil {
return err
}
// Ensure the correct challenge setting
existing.RespondWithChallenges = client.RespondWithChallenges
// Preserve an existing client secret
if len(existing.Secret) == 0 {
existing.Secret = client.Secret
}
// Preserve redirects for clients other than the CLI client
// The CLI client doesn't care about the redirect URL, just the token or error fragment
if preserveExistingRedirects {
// Add in any redirects from the existing one
// This preserves any additional customized redirects in the default clients
redirects := sets.NewString(client.RedirectURIs...)
for _, redirect := range existing.RedirectURIs {
if !redirects.Has(redirect) {
client.RedirectURIs = append(client.RedirectURIs, redirect)
redirects.Insert(redirect)
}
}
}
existing.RedirectURIs = client.RedirectURIs
// If the GrantMethod is present, keep it for compatibility
// If it is empty, assign the requested strategy.
if len(existing.GrantMethod) == 0 {
existing.GrantMethod = client.GrantMethod
}
_, err = clientRegistry.UpdateClient(ctx, existing)
return err
})
}
示例11: createRoleBinding
func (m *VirtualStorage) createRoleBinding(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.RoleBinding, error) {
// Copy object before passing to BeforeCreate, since it mutates
objCopy, err := kapi.Scheme.DeepCopy(obj)
if err != nil {
return nil, err
}
obj = objCopy.(runtime.Object)
if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
return nil, err
}
roleBinding := obj.(*authorizationapi.RoleBinding)
if !allowEscalation {
if err := m.confirmNoEscalation(ctx, roleBinding); err != nil {
return nil, err
}
}
// Retry if we hit a conflict on the underlying PolicyBinding object
if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
policyBinding, err := m.getPolicyBindingForPolicy(ctx, roleBinding.RoleRef.Namespace, allowEscalation)
if err != nil {
return err
}
_, exists := policyBinding.RoleBindings[roleBinding.Name]
if exists {
return kapierrors.NewAlreadyExists(authorizationapi.Resource("rolebinding"), roleBinding.Name)
}
roleBinding.ResourceVersion = policyBinding.ResourceVersion
policyBinding.RoleBindings[roleBinding.Name] = roleBinding
policyBinding.LastModified = unversioned.Now()
return m.BindingRegistry.UpdatePolicyBinding(ctx, policyBinding)
}); err != nil {
return nil, err
}
return roleBinding, nil
}
示例12: addRoleToE2EServiceAccounts
func addRoleToE2EServiceAccounts(c *client.Client, namespaces []kapi.Namespace, roleName string) {
err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
for _, ns := range namespaces {
if strings.HasPrefix(ns.Name, "e2e-") && ns.Status.Phase != kapi.NamespaceTerminating {
sa := fmt.Sprintf("system:serviceaccount:%s:default", ns.Name)
addRole := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: roleName,
RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(ns.Name, c),
Users: []string{sa},
}
if err := addRole.AddRole(); err != nil {
e2e.Logf("Warning: Failed to add role to e2e service account: %v", err)
}
}
}
return nil
})
if err != nil {
FatalErr(err)
}
}
示例13: createRole
func (m *VirtualStorage) createRole(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.Role, error) {
// Copy object before passing to BeforeCreate, since it mutates
objCopy, err := kapi.Scheme.DeepCopy(obj)
if err != nil {
return nil, err
}
obj = objCopy.(runtime.Object)
if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
return nil, err
}
role := obj.(*authorizationapi.Role)
if !allowEscalation {
if err := rulevalidation.ConfirmNoEscalation(ctx, m.Resource, role.Name, m.RuleResolver, m.CachedRuleResolver, authorizationinterfaces.NewLocalRoleAdapter(role)); err != nil {
return nil, err
}
}
if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
policy, err := m.EnsurePolicy(ctx)
if err != nil {
return err
}
if _, exists := policy.Roles[role.Name]; exists {
return kapierrors.NewAlreadyExists(m.Resource, role.Name)
}
role.ResourceVersion = policy.ResourceVersion
policy.Roles[role.Name] = role
policy.LastModified = unversioned.Now()
return m.PolicyStorage.UpdatePolicy(ctx, policy)
}); err != nil {
return nil, err
}
return role, nil
}
示例14: retryBuildStatusUpdate
func retryBuildStatusUpdate(build *api.Build, client client.BuildInterface, sourceRev *api.SourceRevision) error {
return kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
// before updating, make sure we are using the latest version of the build
latestBuild, err := client.Get(build.Name)
if err != nil {
// usually this means we failed to get resources due to the missing
// privilleges
return err
}
if sourceRev != nil {
latestBuild.Spec.Revision = sourceRev
latestBuild.ResourceVersion = ""
}
latestBuild.Status.Reason = build.Status.Reason
latestBuild.Status.Message = build.Status.Message
if _, err := client.UpdateDetails(latestBuild); err != nil {
return err
}
return nil
})
}
示例15: secretDeleted
// secretDeleted reacts to a Secret being deleted by removing a reference from the corresponding ServiceAccount if needed
func (e *TokensController) secretDeleted(obj interface{}) {
secret, ok := obj.(*api.Secret)
if !ok {
// Unknown type. If we missed a Secret deletion, the corresponding ServiceAccount (if it exists)
// will get a secret recreated (if needed) during the ServiceAccount re-list
return
}
serviceAccount, err := e.getServiceAccount(secret, false)
if err != nil {
glog.Error(err)
return
}
if serviceAccount == nil {
return
}
if err := client.RetryOnConflict(RemoveTokenBackoff, func() error {
return e.removeSecretReferenceIfNeeded(serviceAccount, secret.Name)
}); err != nil {
util.HandleError(err)
}
}