本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/util.HandleError函数的典型用法代码示例。如果您正苦于以下问题:Golang HandleError函数的具体用法?Golang HandleError怎么用?Golang HandleError使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了HandleError函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createReplica
func (r RealPodControl) createReplica(namespace string, controller api.ReplicationController) {
desiredLabels := make(labels.Set)
for k, v := range controller.Spec.Template.Labels {
desiredLabels[k] = v
}
desiredAnnotations := make(labels.Set)
for k, v := range controller.Spec.Template.Annotations {
desiredAnnotations[k] = v
}
// use the dash (if the name isn't too long) to make the pod name a bit prettier
prefix := fmt.Sprintf("%s-", controller.Name)
if ok, _ := validation.ValidatePodName(prefix, true); !ok {
prefix = controller.Name
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Labels: desiredLabels,
Annotations: desiredAnnotations,
GenerateName: prefix,
},
}
if err := api.Scheme.Convert(&controller.Spec.Template.Spec, &pod.Spec); err != nil {
util.HandleError(fmt.Errorf("unable to convert pod template: %v", err))
return
}
if labels.Set(pod.Labels).AsSelector().Empty() {
util.HandleError(fmt.Errorf("unable to create pod replica, no labels"))
return
}
if _, err := r.kubeClient.Pods(namespace).Create(pod); err != nil {
util.HandleError(fmt.Errorf("unable to create pod replica: %v", err))
}
}
示例2: invalidateCache
// invalidateCache returns true if there was a change in the cluster namespace that holds cluster policy and policy bindings
func (ac *AuthorizationCache) invalidateCache() bool {
invalidateCache := false
clusterPolicyList, err := ac.policyClient.ReadOnlyClusterPolicies().List(labels.Everything(), fields.Everything())
if err != nil {
util.HandleError(err)
return invalidateCache
}
temporaryVersions := util.NewStringSet()
for _, clusterPolicy := range clusterPolicyList.Items {
temporaryVersions.Insert(clusterPolicy.ResourceVersion)
}
if (len(ac.clusterPolicyResourceVersions) != len(temporaryVersions)) || !ac.clusterPolicyResourceVersions.HasAll(temporaryVersions.List()...) {
invalidateCache = true
ac.clusterPolicyResourceVersions = temporaryVersions
}
clusterPolicyBindingList, err := ac.policyClient.ReadOnlyClusterPolicyBindings().List(labels.Everything(), fields.Everything())
if err != nil {
util.HandleError(err)
return invalidateCache
}
temporaryVersions.Delete(temporaryVersions.List()...)
for _, clusterPolicyBinding := range clusterPolicyBindingList.Items {
temporaryVersions.Insert(clusterPolicyBinding.ResourceVersion)
}
if (len(ac.clusterBindingResourceVersions) != len(temporaryVersions)) || !ac.clusterBindingResourceVersions.HasAll(temporaryVersions.List()...) {
invalidateCache = true
ac.clusterBindingResourceVersions = temporaryVersions
}
return invalidateCache
}
示例3: listAndWatch
func (r *Reflector) listAndWatch(stopCh <-chan struct{}) {
var resourceVersion string
resyncCh, cleanup := r.resyncChan()
defer cleanup()
list, err := r.listerWatcher.List()
if err != nil {
util.HandleError(fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err))
return
}
meta, err := meta.Accessor(list)
if err != nil {
util.HandleError(fmt.Errorf("%s: Unable to understand list result %#v", r.name, list))
return
}
resourceVersion = meta.ResourceVersion()
items, err := runtime.ExtractList(list)
if err != nil {
util.HandleError(fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err))
return
}
if err := r.syncWith(items); err != nil {
util.HandleError(fmt.Errorf("%s: Unable to sync list result: %v", r.name, err))
return
}
r.setLastSyncResourceVersion(resourceVersion)
for {
w, err := r.listerWatcher.Watch(resourceVersion)
if err != nil {
switch err {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
default:
util.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
}
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
// It doesn't make sense to re-list all objects because most likely we will be able to restart
// watch where we ended.
// If that's the case wait and resend watch request.
if urlError, ok := err.(*url.Error); ok {
if opError, ok := urlError.Err.(*net.OpError); ok {
if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
time.Sleep(time.Second)
continue
}
}
}
return
}
if err := r.watchHandler(w, &resourceVersion, resyncCh, stopCh); err != nil {
if err != errorResyncRequested && err != errorStopRequested {
glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
}
return
}
}
}
示例4: watchHandler
// watchHandler watches w and keeps *resourceVersion up to date.
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, resyncCh <-chan time.Time, stopCh <-chan struct{}) error {
start := time.Now()
eventCount := 0
// Stopping the watcher should be idempotent and if we return from this function there's no way
// we're coming back in with the same watch interface.
defer w.Stop()
loop:
for {
select {
case <-stopCh:
return errorStopRequested
case <-resyncCh:
return errorResyncRequested
case event, ok := <-w.ResultChan():
if !ok {
break loop
}
if event.Type == watch.Error {
return apierrs.FromObject(event.Object)
}
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
util.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
continue
}
meta, err := meta.Accessor(event.Object)
if err != nil {
util.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
continue
}
switch event.Type {
case watch.Added:
r.store.Add(event.Object)
case watch.Modified:
r.store.Update(event.Object)
case watch.Deleted:
// TODO: Will any consumers need access to the "last known
// state", which is passed in event.Object? If so, may need
// to change this.
r.store.Delete(event.Object)
default:
util.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
}
*resourceVersion = meta.ResourceVersion()
r.setLastSyncResourceVersion(*resourceVersion)
eventCount++
}
}
watchDuration := time.Now().Sub(start)
if watchDuration < 1*time.Second && eventCount == 0 {
glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
return errors.New("very short watch")
}
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
return nil
}
示例5: RunOnce
// RunOnce verifies the state of the portal IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
// or if they are executed against different leaders,
// the ordering guarantee required to ensure no IP is allocated twice is violated.
// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
// and the release code must not release services that have had IPs allocated but not yet been created
// See #8295
latest, err := c.alloc.Get()
if err != nil {
return fmt.Errorf("unable to refresh the service IP block: %v", err)
}
ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll)
list, err := c.registry.ListServices(ctx)
if err != nil {
return fmt.Errorf("unable to refresh the service IP block: %v", err)
}
r := ipallocator.NewCIDRRange(c.network)
for _, svc := range list.Items {
if !api.IsServiceIPSet(&svc) {
continue
}
ip := net.ParseIP(svc.Spec.PortalIP)
if ip == nil {
// portal IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.PortalIP, svc.Name, svc.Namespace))
continue
}
switch err := r.Allocate(ip); err {
case nil:
case ipallocator.ErrAllocated:
// TODO: send event
// portal IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace))
case ipallocator.ErrNotInRange:
// TODO: send event
// portal IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network))
case ipallocator.ErrFull:
// TODO: send event
return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services")
default:
return fmt.Errorf("unable to allocate portal IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err)
}
}
err = r.Snapshot(latest)
if err != nil {
return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)
}
if err := c.alloc.CreateOrUpdate(latest); err != nil {
return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)
}
return nil
}
示例6: watchControllers
// resourceVersion is a pointer to the resource version to use/update.
func (rm *ReplicationManager) watchControllers(resourceVersion *string) {
watching, err := rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(
labels.Everything(),
fields.Everything(),
*resourceVersion,
)
if err != nil {
util.HandleError(fmt.Errorf("unable to watch: %v", err))
time.Sleep(5 * time.Second)
return
}
for {
select {
case <-rm.syncTime:
rm.synchronize()
case event, open := <-watching.ResultChan():
if !open {
// watchChannel has been closed, or something else went
// wrong with our watch call. Let the util.Forever()
// that called us call us again.
return
}
if event.Type == watch.Error {
util.HandleError(fmt.Errorf("error from watch during sync: %v", errors.FromObject(event.Object)))
// Clear the resource version, this may cause us to skip some elements on the watch,
// but we'll catch them on the synchronize() call, so it works out.
*resourceVersion = ""
continue
}
glog.V(4).Infof("Got watch: %#v", event)
rc, ok := event.Object.(*api.ReplicationController)
if !ok {
if status, ok := event.Object.(*api.Status); ok {
if status.Status == api.StatusFailure {
glog.Errorf("failed to watch: %v", status)
// Clear resource version here, as above, this won't hurt consistency, but we
// should consider introspecting more carefully here. (or make the apiserver smarter)
// "why not both?"
*resourceVersion = ""
continue
}
}
util.HandleError(fmt.Errorf("unexpected object: %#v", event.Object))
continue
}
// If we get disconnected, start where we left off.
*resourceVersion = rc.ResourceVersion
// Sync even if this is a deletion event, to ensure that we leave
// it in the desired state.
glog.V(4).Infof("About to sync from watch: %v", rc.Name)
if err := rm.syncHandler(*rc); err != nil {
util.HandleError(fmt.Errorf("unexpected sync error: %v", err))
}
}
}
}
示例7: RunOnce
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
// or if they are executed against different leaders,
// the ordering guarantee required to ensure no port is allocated twice is violated.
// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
// and the release code must not release services that have had ports allocated but not yet been created
// See #8295
latest, err := c.alloc.Get()
if err != nil {
return fmt.Errorf("unable to refresh the port block: %v", err)
}
ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll)
list, err := c.registry.ListServices(ctx)
if err != nil {
return fmt.Errorf("unable to refresh the port block: %v", err)
}
r := portallocator.NewPortAllocator(c.portRange)
for i := range list.Items {
svc := &list.Items[i]
ports := service.CollectServiceNodePorts(svc)
if len(ports) == 0 {
continue
}
for _, port := range ports {
switch err := r.Allocate(port); err {
case nil:
case portallocator.ErrAllocated:
// TODO: send event
// port is broken, reallocate
util.HandleError(fmt.Errorf("the port %d for service %s/%s was assigned to multiple services; please recreate", port, svc.Name, svc.Namespace))
case portallocator.ErrNotInRange:
// TODO: send event
// port is broken, reallocate
util.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %v; please recreate", port, svc.Name, svc.Namespace, c.portRange))
case portallocator.ErrFull:
// TODO: send event
return fmt.Errorf("the port range %v is full; you must widen the port range in order to create new services", c.portRange)
default:
return fmt.Errorf("unable to allocate port %d for service %s/%s due to an unknown error, exiting: %v", port, svc.Name, svc.Namespace, err)
}
}
}
err = r.Snapshot(latest)
if err != nil {
return fmt.Errorf("unable to persist the updated port allocations: %v", err)
}
if err := c.alloc.CreateOrUpdate(latest); err != nil {
return fmt.Errorf("unable to persist the updated port allocations: %v", err)
}
return nil
}
示例8: handleLocationChange
// handleLocationChange goes through all service account dockercfg secrets and updates them to point at a new docker-registry location
func (e *DockerRegistryServiceController) handleLocationChange(serviceLocation string) error {
e.dockercfgController.SetDockerURL(serviceLocation)
dockercfgSecrets, err := e.listDockercfgSecrets()
if err != nil {
return err
}
for _, dockercfgSecret := range dockercfgSecrets {
dockercfg := &credentialprovider.DockerConfig{}
if err := json.Unmarshal(dockercfgSecret.Data[api.DockerConfigKey], dockercfg); err != nil {
util.HandleError(err)
continue
}
dockercfgMap := map[string]credentialprovider.DockerConfigEntry(*dockercfg)
keys := util.KeySet(reflect.ValueOf(dockercfgMap))
if len(keys) != 1 {
util.HandleError(err)
continue
}
oldKey := keys.List()[0]
// if there's no change, skip
if oldKey == serviceLocation {
continue
}
dockercfgMap[serviceLocation] = dockercfgMap[oldKey]
delete(dockercfgMap, oldKey)
t := credentialprovider.DockerConfig(dockercfgMap)
dockercfg = &t
dockercfgContent, err := json.Marshal(dockercfg)
if err != nil {
util.HandleError(err)
continue
}
dockercfgSecret.Data[api.DockerConfigKey] = dockercfgContent
if _, err := e.client.Secrets(dockercfgSecret.Namespace).Update(dockercfgSecret); err != nil {
util.HandleError(err)
continue
}
}
return err
}
示例9: synchronizePolicyBindings
// synchronizePolicyBindings synchronizes access over each policy binding
func (ac *AuthorizationCache) synchronizePolicyBindings(userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) {
policyBindingList, err := ac.policyClient.ReadOnlyPolicyBindings(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
if err != nil {
util.HandleError(err)
return
}
for _, policyBinding := range policyBindingList.Items {
reviewRequest := &reviewRequest{
namespace: policyBinding.Namespace,
policyBindingUIDToResourceVersion: map[types.UID]string{policyBinding.UID: policyBinding.ResourceVersion},
}
if err := ac.syncHandler(reviewRequest, userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore); err != nil {
util.HandleError(fmt.Errorf("error synchronizing: %v", err))
}
}
}
示例10: RoundTrip
// RoundTrip sends the request to the backend and strips off the CORS headers
// before returning the response.
func (p *UpgradeAwareSingleHostReverseProxy) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := p.transport.RoundTrip(req)
if err != nil {
return resp, err
}
removeCORSHeaders(resp)
removeChallengeHeaders(resp)
if resp.StatusCode == http.StatusUnauthorized {
util.HandleError(fmt.Errorf("got unauthorized error from backend for: %s %s", req.Method, req.URL))
// Internal error, backend didn't recognize proxy identity
// Surface as a server error to the client
// TODO do we need to do more than this?
resp = &http.Response{
StatusCode: http.StatusInternalServerError,
Status: http.StatusText(http.StatusInternalServerError),
Body: ioutil.NopCloser(strings.NewReader("Internal Server Error")),
ContentLength: -1,
}
}
// TODO do we need to strip off anything else?
return resp, err
}
示例11: Create
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
lw := &cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()
c := &ImportController{
client: dockerregistry.NewClient(),
streams: f.Client,
mappings: f.Client,
}
return &controller.RetryController{
Queue: q,
RetryManager: controller.NewQueueRetryManager(
q,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
util.HandleError(err)
return retries.Count < 5
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
r := obj.(*api.ImageStream)
return c.Next(r)
},
}
}
示例12: Create
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run()
imageChangeController := &buildcontroller.ImageChangeController{
BuildConfigStore: store,
BuildConfigInstantiator: factory.BuildConfigInstantiator,
Stop: factory.Stop,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal {
return false
}
return retries.Count < maxRetries
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
imageRepo := obj.(*imageapi.ImageStream)
return imageChangeController.HandleImageRepo(imageRepo)
},
}
}
示例13: Render
func (r confirmTemplateRenderer) Render(form ConfirmForm, w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
if err := confirmTemplate.Execute(w, form); err != nil {
util.HandleError(fmt.Errorf("unable render confirm template: %v", err))
}
}
示例14: RunKubernetesService
// RunKubernetesService periodically updates the kubernetes service
func (c *Controller) RunKubernetesService(ch chan struct{}) {
util.Until(func() {
if err := c.UpdateKubernetesService(); err != nil {
util.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err))
}
}, c.EndpointInterval, ch)
}
示例15: secretDeleted
// secretDeleted reacts to a Secret being deleted by looking to see if it's a dockercfg secret for a service account, in which case it
// it removes the references from the service account and removes the token created to back the dockercfgSecret
func (e *DockercfgDeletedController) secretDeleted(obj interface{}) {
dockercfgSecret, ok := obj.(*api.Secret)
if !ok {
return
}
if _, exists := dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]; !exists {
return
}
for i := 1; i <= NumServiceAccountUpdateRetries; i++ {
if err := e.removeDockercfgSecretReference(dockercfgSecret); err != nil {
if kapierrors.IsConflict(err) && i < NumServiceAccountUpdateRetries {
time.Sleep(wait.Jitter(100*time.Millisecond, 0.0))
continue
}
glog.Error(err)
break
}
break
}
// remove the reference token secret
if err := e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]); (err != nil) && !kapierrors.IsNotFound(err) {
util.HandleError(err)
}
}