本文整理匯總了Golang中k8s/io/kubernetes/pkg/util.HandleError函數的典型用法代碼示例。如果您正苦於以下問題:Golang HandleError函數的具體用法?Golang HandleError怎麽用?Golang HandleError使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了HandleError函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: sendDelete
func (w *etcdWatcher) sendDelete(res *etcd.Response) {
if res.PrevNode == nil {
util.HandleError(fmt.Errorf("unexpected nil prev node: %#v", res))
return
}
if w.include != nil && !w.include(res.PrevNode.Key) {
return
}
node := *res.PrevNode
if res.Node != nil {
// Note that this sends the *old* object with the etcd index for the time at
// which it gets deleted. This will allow users to restart the watch at the right
// index.
node.ModifiedIndex = res.Node.ModifiedIndex
}
obj, err := w.decodeObject(&node)
if err != nil {
util.HandleError(fmt.Errorf("failure to decode api object: %v\nfrom %#v %#v", err, res, res.Node))
// TODO: expose an error through watch.Interface?
// Ignore this value. If we stop the watch on a bad value, a client that uses
// the resourceVersion to resume will never be able to get past a bad value.
return
}
if !w.filter(obj) {
return
}
w.emit(watch.Event{
Type: watch.Deleted,
Object: obj,
})
}
示例2: UpgradeResponse
// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
// streams. newStreamHandler will be called synchronously whenever the
// other end of the upgraded connection creates a new stream.
func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header)
return nil
}
hijacker, ok := w.(http.Hijacker)
if !ok {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "unable to upgrade: unable to hijack response")
return nil
}
w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
w.WriteHeader(http.StatusSwitchingProtocols)
conn, _, err := hijacker.Hijack()
if err != nil {
util.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
return nil
}
spdyConn, err := NewServerConnection(conn, newStreamHandler)
if err != nil {
util.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
return nil
}
return spdyConn
}
示例3: decodeObject
func (w *etcdWatcher) decodeObject(node *etcd.Node) (runtime.Object, error) {
if obj, found := w.cache.getFromCache(node.ModifiedIndex, storage.Everything); found {
return obj, nil
}
obj, err := runtime.Decode(w.encoding, []byte(node.Value))
if err != nil {
return nil, err
}
// ensure resource version is set on the object we load from etcd
if w.versioner != nil {
if err := w.versioner.UpdateObject(obj, node.Expiration, node.ModifiedIndex); err != nil {
util.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", node.ModifiedIndex, obj, err))
}
}
// perform any necessary transformation
if w.transform != nil {
obj, err = w.transform(obj)
if err != nil {
util.HandleError(fmt.Errorf("failure to transform api object %#v: %v", obj, err))
return nil, err
}
}
if node.ModifiedIndex != 0 {
w.cache.addToCache(node.ModifiedIndex, obj)
}
return obj, nil
}
示例4: sendAdd
func (w *etcdWatcher) sendAdd(res *etcd.Response) {
if res.Node == nil {
util.HandleError(fmt.Errorf("unexpected nil node: %#v", res))
return
}
if w.include != nil && !w.include(res.Node.Key) {
return
}
obj, err := w.decodeObject(res.Node)
if err != nil {
util.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node))
// TODO: expose an error through watch.Interface?
// Ignore this value. If we stop the watch on a bad value, a client that uses
// the resourceVersion to resume will never be able to get past a bad value.
return
}
if !w.filter(obj) {
return
}
action := watch.Added
if res.Node.ModifiedIndex != res.Node.CreatedIndex {
action = watch.Modified
}
w.emit(watch.Event{
Type: action,
Object: obj,
})
}
示例5: invalidateCache
// invalidateCache returns true if there was a change in the cluster namespace that holds cluster policy and policy bindings
func (ac *AuthorizationCache) invalidateCache() bool {
invalidateCache := false
clusterPolicyList, err := ac.policyClient.ReadOnlyClusterPolicies().List(nil)
if err != nil {
util.HandleError(err)
return invalidateCache
}
temporaryVersions := sets.NewString()
for _, clusterPolicy := range clusterPolicyList.Items {
temporaryVersions.Insert(clusterPolicy.ResourceVersion)
}
if (len(ac.clusterPolicyResourceVersions) != len(temporaryVersions)) || !ac.clusterPolicyResourceVersions.HasAll(temporaryVersions.List()...) {
invalidateCache = true
ac.clusterPolicyResourceVersions = temporaryVersions
}
clusterPolicyBindingList, err := ac.policyClient.ReadOnlyClusterPolicyBindings().List(nil)
if err != nil {
util.HandleError(err)
return invalidateCache
}
temporaryVersions.Delete(temporaryVersions.List()...)
for _, clusterPolicyBinding := range clusterPolicyBindingList.Items {
temporaryVersions.Insert(clusterPolicyBinding.ResourceVersion)
}
if (len(ac.clusterBindingResourceVersions) != len(temporaryVersions)) || !ac.clusterBindingResourceVersions.HasAll(temporaryVersions.List()...) {
invalidateCache = true
ac.clusterBindingResourceVersions = temporaryVersions
}
return invalidateCache
}
示例6: PrepareForUpdate
// PrepareForUpdate clears fields that are not allowed to be set by end users on update.
// It extracts the latest info from the manifest and sets that on the object. It allows a user
// to update the manifest so that it matches the digest (in case an older server stored a manifest
// that was malformed, it can always be corrected).
func (imageStrategy) PrepareForUpdate(obj, old runtime.Object) {
newImage := obj.(*api.Image)
oldImage := old.(*api.Image)
// image metadata cannot be altered
newImage.DockerImageReference = oldImage.DockerImageReference
newImage.DockerImageMetadata = oldImage.DockerImageMetadata
newImage.DockerImageMetadataVersion = oldImage.DockerImageMetadataVersion
newImage.DockerImageLayers = oldImage.DockerImageLayers
// allow an image update that results in the manifest matching the digest (the name)
newManifest := newImage.DockerImageManifest
newImage.DockerImageManifest = oldImage.DockerImageManifest
if newManifest != oldImage.DockerImageManifest && len(newManifest) > 0 {
ok, err := api.ManifestMatchesImage(oldImage, []byte(newManifest))
if err != nil {
util.HandleError(fmt.Errorf("attempted to validate that a manifest change to %q matched the signature, but failed: %v", oldImage.Name, err))
} else if ok {
newImage.DockerImageManifest = newManifest
}
}
if err := api.ImageWithMetadata(newImage); err != nil {
util.HandleError(fmt.Errorf("Unable to update image metadata for %q: %v", newImage.Name, err))
}
}
示例7: NewNamespaceController
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(kubeClient client.Interface, experimentalMode bool, resyncPeriod time.Duration) *NamespaceController {
var controller *framework.Controller
_, controller = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
},
&api.Namespace{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace := obj.(*api.Namespace)
if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil {
if estimate, ok := err.(*contentRemainingError); ok {
go func() {
// Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s
// for pods. However, most processes will terminate faster - within a few seconds, probably
// with a peak within 5-10s. So this division is a heuristic that avoids waiting the full
// duration when in many cases things complete more quickly. The extra second added is to
// ensure we never wait 0 seconds.
t := estimate.Estimate/2 + 1
glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
time.Sleep(time.Duration(t) * time.Second)
if err := controller.Requeue(namespace); err != nil {
util.HandleError(err)
}
}()
return
}
util.HandleError(err)
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
namespace := newObj.(*api.Namespace)
if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil {
if estimate, ok := err.(*contentRemainingError); ok {
go func() {
t := estimate.Estimate/2 + 1
glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
time.Sleep(time.Duration(t) * time.Second)
if err := controller.Requeue(namespace); err != nil {
util.HandleError(err)
}
}()
return
}
util.HandleError(err)
}
},
},
)
return &NamespaceController{
controller: controller,
}
}
示例8: listAndWatch
func (r *Reflector) listAndWatch(stopCh <-chan struct{}) {
var resourceVersion string
resyncCh, cleanup := r.resyncChan()
defer cleanup()
list, err := r.listerWatcher.List()
if err != nil {
util.HandleError(fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err))
return
}
meta, err := meta.Accessor(list)
if err != nil {
util.HandleError(fmt.Errorf("%s: Unable to understand list result %#v", r.name, list))
return
}
resourceVersion = meta.ResourceVersion()
items, err := runtime.ExtractList(list)
if err != nil {
util.HandleError(fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err))
return
}
if err := r.syncWith(items); err != nil {
util.HandleError(fmt.Errorf("%s: Unable to sync list result: %v", r.name, err))
return
}
r.setLastSyncResourceVersion(resourceVersion)
for {
w, err := r.listerWatcher.Watch(resourceVersion)
if err != nil {
switch err {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
default:
util.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
}
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
// It doesn't make sense to re-list all objects because most likely we will be able to restart
// watch where we ended.
// If that's the case wait and resend watch request.
if urlError, ok := err.(*url.Error); ok {
if opError, ok := urlError.Err.(*net.OpError); ok {
if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
time.Sleep(time.Second)
continue
}
}
}
return
}
if err := r.watchHandler(w, &resourceVersion, resyncCh, stopCh); err != nil {
if err != errorResyncRequested && err != errorStopRequested {
glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
}
return
}
}
}
示例9: watchHandler
// watchHandler watches w and keeps *resourceVersion up to date.
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, resyncCh <-chan time.Time, stopCh <-chan struct{}) error {
start := time.Now()
eventCount := 0
// Stopping the watcher should be idempotent and if we return from this function there's no way
// we're coming back in with the same watch interface.
defer w.Stop()
loop:
for {
select {
case <-stopCh:
return errorStopRequested
case <-resyncCh:
return errorResyncRequested
case event, ok := <-w.ResultChan():
if !ok {
break loop
}
if event.Type == watch.Error {
return apierrs.FromObject(event.Object)
}
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
util.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
continue
}
meta, err := meta.Accessor(event.Object)
if err != nil {
util.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
continue
}
newResourceVersion := meta.ResourceVersion()
switch event.Type {
case watch.Added:
r.store.Add(event.Object)
case watch.Modified:
r.store.Update(event.Object)
case watch.Deleted:
// TODO: Will any consumers need access to the "last known
// state", which is passed in event.Object? If so, may need
// to change this.
r.store.Delete(event.Object)
default:
util.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
}
*resourceVersion = newResourceVersion
r.setLastSyncResourceVersion(newResourceVersion)
eventCount++
}
}
watchDuration := time.Now().Sub(start)
if watchDuration < 1*time.Second && eventCount == 0 {
glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
return errors.New("very short watch")
}
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
return nil
}
示例10: Run
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run(period time.Duration) {
// Incorporate the results of node status pushed from kubelet to master.
go util.Until(func() {
if err := nc.monitorNodeStatus(); err != nil {
glog.Errorf("Error monitoring node status: %v", err)
}
}, nc.nodeMonitorPeriod, util.NeverStop)
// Managing eviction of nodes:
// 1. when we delete pods off a node, if the node was not empty at the time we then
// queue a termination watcher
// a. If we hit an error, retry deletion
// 2. The terminator loop ensures that pods are eventually cleaned and we never
// terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt
// is the time from which we measure "has this pod been terminating too long",
// after which we will delete the pod with grace period 0 (force delete).
// a. If we hit errors, retry instantly
// b. If there are no pods left terminating, exit
// c. If there are pods still terminating, wait for their estimated completion
// before retrying
go util.Until(func() {
nc.podEvictor.Try(func(value TimedValue) (bool, time.Duration) {
remaining, err := nc.deletePods(value.Value)
if err != nil {
util.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if remaining {
nc.terminationEvictor.Add(value.Value)
}
return true, 0
})
}, nodeEvictionPeriod, util.NeverStop)
// TODO: replace with a controller that ensures pods that are terminating complete
// in a particular time period
go util.Until(func() {
nc.terminationEvictor.Try(func(value TimedValue) (bool, time.Duration) {
completed, remaining, err := nc.terminatePods(value.Value, value.AddedAt)
if err != nil {
util.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err))
return false, 0
}
if completed {
glog.Infof("All pods terminated on %s", value.Value)
nc.recordNodeEvent(value.Value, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
return true, 0
}
glog.V(2).Infof("Pods terminating since %s on %q, estimated completion %s", value.AddedAt, value.Value, remaining)
// clamp very short intervals
if remaining < nodeEvictionPeriod {
remaining = nodeEvictionPeriod
}
return false, remaining
})
}, nodeEvictionPeriod, util.NeverStop)
}
示例11: manageReplicas
// manageReplicas checks and updates replicas for the given replication controller.
func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) {
diff := len(filteredPods) - rc.Spec.Replicas
rcKey, err := controller.KeyFunc(rc)
if err != nil {
glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)
return
}
if diff < 0 {
diff *= -1
if diff > rm.burstReplicas {
diff = rm.burstReplicas
}
rm.expectations.ExpectCreations(rcKey, diff)
wait := sync.WaitGroup{}
wait.Add(diff)
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
for i := 0; i < diff; i++ {
go func() {
defer wait.Done()
if err := rm.podControl.CreatePods(rc.Namespace, rc.Spec.Template, rc); err != nil {
// Decrement the expected number of creates because the informer won't observe this pod
glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name)
rm.expectations.CreationObserved(rcKey)
util.HandleError(err)
}
}()
}
wait.Wait()
} else if diff > 0 {
if diff > rm.burstReplicas {
diff = rm.burstReplicas
}
rm.expectations.ExpectDeletions(rcKey, diff)
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
// No need to sort pods if we are about to delete all of them
if rc.Spec.Replicas != 0 {
// Sort the pods in the order such that not-ready < ready, unscheduled
// < scheduled, and pending < running. This ensures that we delete pods
// in the earlier stages whenever possible.
sort.Sort(controller.ActivePods(filteredPods))
}
wait := sync.WaitGroup{}
wait.Add(diff)
for i := 0; i < diff; i++ {
go func(ix int) {
defer wait.Done()
if err := rm.podControl.DeletePod(rc.Namespace, filteredPods[ix].Name); err != nil {
// Decrement the expected number of deletes because the informer won't observe this deletion
glog.V(2).Infof("Failed deletion, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name)
rm.expectations.DeletionObserved(rcKey)
util.HandleError(err)
}
}(i)
}
wait.Wait()
}
}
示例12: Create
// Create creates a DeploymentConfigController.
func (factory *DeploymentConfigControllerFactory) Create() controller.RunnableController {
deploymentConfigLW := &cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(options)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"})
configController := NewDeploymentConfigController(factory.KubeClient, factory.Client, factory.Codec, recorder)
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
config := obj.(*deployapi.DeploymentConfig)
// no retries for a fatal error
if _, isFatal := err.(fatalError); isFatal {
glog.V(4).Infof("Will not retry fatal error for deploymentConfig %s/%s: %v", config.Namespace, config.Name, err)
kutil.HandleError(err)
return false
}
// infinite retries for a transient error
if _, isTransient := err.(transientError); isTransient {
glog.V(4).Infof("Retrying deploymentConfig %s/%s with error: %v", config.Namespace, config.Name, err)
return true
}
kutil.HandleError(err)
// no retries for anything else
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
config := obj.(*deployapi.DeploymentConfig)
return configController.Handle(config)
},
}
}
示例13: HandleEndpoints
// HandleEndpoints handles a single Endpoints event and refreshes the router backend.
func (c *RouterController) HandleEndpoints() {
eventType, endpoints, err := c.NextEndpoints()
if err != nil {
util.HandleError(fmt.Errorf("unable to read endpoints: %v", err))
return
}
c.lock.Lock()
defer c.lock.Unlock()
if err := c.Plugin.HandleEndpoints(eventType, endpoints); err != nil {
util.HandleError(err)
}
}
示例14: NewServiceGroup
// NewServiceGroup returns the ServiceGroup and a set of all the NodeIDs covered by the service
func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (ServiceGroup, IntSet) {
covered := IntSet{}
covered.Insert(serviceNode.ID())
service := ServiceGroup{}
service.Service = serviceNode
for _, uncastServiceFulfiller := range g.PredecessorNodesByEdgeKind(serviceNode, kubeedges.ExposedThroughServiceEdgeKind) {
container := osgraph.GetTopLevelContainerNode(g, uncastServiceFulfiller)
switch castContainer := container.(type) {
case *deploygraph.DeploymentConfigNode:
service.FulfillingDCs = append(service.FulfillingDCs, castContainer)
case *kubegraph.ReplicationControllerNode:
service.FulfillingRCs = append(service.FulfillingRCs, castContainer)
case *kubegraph.PodNode:
service.FulfillingPods = append(service.FulfillingPods, castContainer)
default:
util.HandleError(fmt.Errorf("unrecognized container: %v", castContainer))
}
}
for _, uncastServiceFulfiller := range g.PredecessorNodesByEdgeKind(serviceNode, routeedges.ExposedThroughRouteEdgeKind) {
container := osgraph.GetTopLevelContainerNode(g, uncastServiceFulfiller)
switch castContainer := container.(type) {
case *routegraph.RouteNode:
service.ExposingRoutes = append(service.ExposingRoutes, castContainer)
default:
util.HandleError(fmt.Errorf("unrecognized container: %v", castContainer))
}
}
// add the DCPipelines for all the DCs that fulfill the service
for _, fulfillingDC := range service.FulfillingDCs {
dcPipeline, dcCovers := NewDeploymentConfigPipeline(g, fulfillingDC)
covered.Insert(dcCovers.List()...)
service.DeploymentConfigPipelines = append(service.DeploymentConfigPipelines, dcPipeline)
}
for _, fulfillingRC := range service.FulfillingRCs {
rcView, rcCovers := NewReplicationController(g, fulfillingRC)
covered.Insert(rcCovers.List()...)
service.ReplicationControllers = append(service.ReplicationControllers, rcView)
}
return service, covered
}
示例15: handleLocationChange
// handleLocationChange goes through all service account dockercfg secrets and updates them to point at a new docker-registry location
func (e *DockerRegistryServiceController) handleLocationChange(serviceLocation string) error {
e.dockercfgController.SetDockerURL(serviceLocation)
dockercfgSecrets, err := e.listDockercfgSecrets()
if err != nil {
return err
}
for _, dockercfgSecret := range dockercfgSecrets {
dockercfg := &credentialprovider.DockerConfig{}
if err := json.Unmarshal(dockercfgSecret.Data[api.DockerConfigKey], dockercfg); err != nil {
util.HandleError(err)
continue
}
dockercfgMap := map[string]credentialprovider.DockerConfigEntry(*dockercfg)
keys := util.KeySet(reflect.ValueOf(dockercfgMap))
if len(keys) != 1 {
util.HandleError(err)
continue
}
oldKey := keys.List()[0]
// if there's no change, skip
if oldKey == serviceLocation {
continue
}
dockercfgMap[serviceLocation] = dockercfgMap[oldKey]
delete(dockercfgMap, oldKey)
t := credentialprovider.DockerConfig(dockercfgMap)
dockercfg = &t
dockercfgContent, err := json.Marshal(dockercfg)
if err != nil {
util.HandleError(err)
continue
}
dockercfgSecret.Data[api.DockerConfigKey] = dockercfgContent
if _, err := e.client.Secrets(dockercfgSecret.Namespace).Update(dockercfgSecret); err != nil {
util.HandleError(err)
continue
}
}
return err
}