本文整理匯總了Golang中k8s/io/kubernetes/pkg/util.HandleCrash函數的典型用法代碼示例。如果您正苦於以下問題:Golang HandleCrash函數的具體用法?Golang HandleCrash怎麽用?Golang HandleCrash使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了HandleCrash函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Run
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
defer util.HandleCrash()
go e.serviceController.Run(stopCh)
go e.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(e.worker, time.Second, stopCh)
}
go func() {
defer util.HandleCrash()
time.Sleep(5 * time.Minute) // give time for our cache to fill
e.checkLeftoverEndpoints()
}()
<-stopCh
e.queue.ShutDown()
}
示例2: Copy
// Copy the reader to the response. The created WebSocket is closed after this
// method completes.
func (r *Reader) Copy(w http.ResponseWriter, req *http.Request) error {
go func() {
defer util.HandleCrash()
websocket.Server{Handshake: r.handshake, Handler: r.handle}.ServeHTTP(w, req)
}()
return <-r.err
}
示例3: ObserveResourceVersion
func (c *clusterResourceVersionObserver) ObserveResourceVersion(resourceVersion string, timeout time.Duration) error {
if len(c.watchers) == 0 {
return nil
}
wg := &sync.WaitGroup{}
backendErrors := make([]error, len(c.watchers), len(c.watchers))
for i, watcher := range c.watchers {
wg.Add(1)
go func(i int, watcher rest.Watcher) {
defer kutil.HandleCrash()
defer wg.Done()
backendErrors[i] = watchForResourceVersion(c.versioner, watcher, resourceVersion, timeout)
}(i, watcher)
}
glog.V(5).Infof("waiting for resourceVersion %s to be distributed", resourceVersion)
wg.Wait()
successes := 0
for _, err := range backendErrors {
if err == nil {
successes++
} else {
glog.V(4).Infof("error verifying resourceVersion %s: %v", resourceVersion, err)
}
}
glog.V(5).Infof("resourceVersion %s was distributed to %d etcd cluster members (out of %d)", resourceVersion, successes, len(c.watchers))
if successes >= c.successThreshold {
return nil
}
return fmt.Errorf("resourceVersion %s was observed on %d cluster members (threshold %d): %v", resourceVersion, successes, c.successThreshold, backendErrors)
}
示例4: finishRequest
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
// when the select statement reads something other than the one the goroutine sends on.
ch := make(chan runtime.Object, 1)
errCh := make(chan error, 1)
panicCh := make(chan interface{}, 1)
go func() {
// panics don't cross goroutine boundaries, so we have to handle ourselves
defer util.HandleCrash(func(panicReason interface{}) {
// Propagate to parent goroutine
panicCh <- panicReason
})
if result, err := fn(); err != nil {
errCh <- err
} else {
ch <- result
}
}()
select {
case result = <-ch:
if status, ok := result.(*unversioned.Status); ok {
return nil, errors.FromObject(status)
}
return result, nil
case err = <-errCh:
return nil, err
case p := <-panicCh:
panic(p)
case <-time.After(timeout):
return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
}
}
示例5: buildImage
// buildImage invokes a docker build on a particular directory
func buildImage(client DockerClient, dir string, noCache bool, tag string, tar tar.Tar, pullAuth *docker.AuthConfigurations, forcePull bool) error {
// TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit
r, w := io.Pipe()
go func() {
defer util.HandleCrash()
defer w.Close()
if err := tar.CreateTarStream(dir, false, w); err != nil {
w.CloseWithError(err)
}
}()
defer w.Close()
glog.V(5).Infof("Invoking Docker build to create %q", tag)
opts := docker.BuildImageOptions{
Name: tag,
RmTmpContainer: true,
OutputStream: os.Stdout,
InputStream: r,
NoCache: noCache,
Pull: forcePull,
}
if pullAuth != nil {
opts.AuthConfigs = *pullAuth
}
return client.BuildImage(opts)
}
示例6: getBackendConn
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr net.Addr, proxier *Proxier, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
activeClients.mu.Lock()
defer activeClients.mu.Unlock()
svrConn, found := activeClients.clients[cliAddr.String()]
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.clients[cliAddr.String()] = svrConn
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
defer util.HandleCrash()
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
}(cliAddr, svrConn, activeClients, timeout)
}
return svrConn, nil
}
示例7: buildImage
// buildImage invokes a docker build on a particular directory
func buildImage(client DockerClient, dir string, dockerfilePath string, noCache bool, tag string, tar tar.Tar, pullAuth *docker.AuthConfigurations, forcePull bool, cgLimits *s2iapi.CGroupLimits) error {
// TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit
r, w := io.Pipe()
go func() {
defer util.HandleCrash()
defer w.Close()
if err := tar.CreateTarStream(dir, false, w); err != nil {
w.CloseWithError(err)
}
}()
defer w.Close()
glog.V(5).Infof("Invoking Docker build to create %q", tag)
opts := docker.BuildImageOptions{
Name: tag,
RmTmpContainer: true,
OutputStream: os.Stdout,
InputStream: r,
Dockerfile: dockerfilePath,
NoCache: noCache,
Pull: forcePull,
}
if cgLimits != nil {
opts.Memory = cgLimits.MemoryLimitBytes
opts.Memswap = cgLimits.MemorySwap
opts.CPUShares = cgLimits.CPUShares
opts.CPUPeriod = cgLimits.CPUPeriod
opts.CPUQuota = cgLimits.CPUQuota
}
if pullAuth != nil {
opts.AuthConfigs = *pullAuth
}
return client.BuildImage(opts)
}
示例8: etcdWatch
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called
// as a goroutine.
func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) {
defer util.HandleCrash()
defer close(w.etcdError)
defer close(w.etcdIncoming)
if resourceVersion == 0 {
latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.etcdIncoming)
if err != nil {
w.etcdError <- err
return
}
resourceVersion = latest
}
opts := etcd.WatcherOptions{
Recursive: w.list,
AfterIndex: resourceVersion,
}
watcher := client.Watcher(key, &opts)
w.stopLock.Lock()
w.ctx, w.cancel = context.WithCancel(ctx)
w.stopLock.Unlock()
for {
resp, err := watcher.Next(w.ctx)
if err != nil {
w.etcdError <- err
return
}
w.etcdIncoming <- resp
}
}
示例9: etcdWatch
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called
// as a goroutine.
func (w *etcdWatcher) etcdWatch(client tools.EtcdClient, key string, resourceVersion uint64) {
// glog.Infof("Watching")
defer util.HandleCrash()
defer close(w.etcdError)
if resourceVersion == 0 {
latest, err := etcdGetInitialWatchState(client, key, w.list, w.etcdIncoming)
if err != nil {
if etcdError, ok := err.(*etcd.EtcdError); ok && etcdError != nil && etcdError.ErrorCode == tools.EtcdErrorCodeNotFound {
// glog.Errorf("Error getting initial watch, key not found: %v", err)
return
}
glog.Errorf("Error getting initial watch: %v", err)
w.etcdError <- err
return
}
resourceVersion = latest + 1
}
response, err := client.Watch(key, resourceVersion, w.list, w.etcdIncoming, w.etcdStop)
glog.Infof("response is %v", response)
if err != nil && err != etcd.ErrWatchStoppedByUser {
glog.Errorf("Error watch: %v", err)
w.etcdError <- err
}
}
示例10: makeDefaultErrorFunc
func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) {
return func(pod *api.Pod, err error) {
if err == scheduler.ErrNoNodesAvailable {
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
} else {
glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err)
}
backoff.gc()
// Retry asynchronously.
// Note that this is extremely rudimentary and we need a more real error handling path.
go func() {
defer util.HandleCrash()
podID := pod.Name
podNamespace := pod.Namespace
backoff.wait(podID)
// Get the pod again; it may have changed/been scheduled already.
pod = &api.Pod{}
err := factory.Client.Get().Namespace(podNamespace).Resource("pods").Name(podID).Do().Into(pod)
if err != nil {
if !errors.IsNotFound(err) {
glog.Errorf("Error getting pod %v for retry: %v; abandoning", podID, err)
}
return
}
if pod.Spec.NodeName == "" {
podQueue.Add(pod)
}
}()
}
}
示例11: addServiceOnPort
// addServiceOnPort starts listening for a new service, returning the serviceInfo.
// Pass proxyPort=0 to allocate a random port. The timeout only applies to UDP
// connections, for now.
func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol api.Protocol, proxyPort int, timeout time.Duration) (*serviceInfo, error) {
sock, err := newProxySocket(protocol, proxier.listenIP, proxyPort)
if err != nil {
return nil, err
}
_, portStr, err := net.SplitHostPort(sock.Addr().String())
if err != nil {
sock.Close()
return nil, err
}
portNum, err := strconv.Atoi(portStr)
if err != nil {
sock.Close()
return nil, err
}
si := &serviceInfo{
proxyPort: portNum,
protocol: protocol,
socket: sock,
timeout: timeout,
sessionAffinityType: api.ServiceAffinityNone, // default
stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API.
}
proxier.setServiceInfo(service, si)
glog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum)
go func(service proxy.ServicePortName, proxier *Proxier) {
defer util.HandleCrash()
atomic.AddInt32(&proxier.numProxyLoops, 1)
sock.ProxyLoop(service, si, proxier)
atomic.AddInt32(&proxier.numProxyLoops, -1)
}(service, proxier)
return si, nil
}
示例12: handleSchedulingError
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {
if schedulingErr == noSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer util.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.GC()
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().ForPod(podKey); state {
case podtask.StateUnknown:
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
case podtask.StatePending:
if task.Has(podtask.Launched) {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
breakoutEarly := queue.BreakChan(nil)
if schedulingErr == noSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
breakoutEarly = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().Get(task.ID); state {
case podtask.StatePending:
// Assess fitness of pod with the current offer. The scheduler normally
// "backs off" when it can't find an offer that matches up with a pod.
// The backoff period for a pod can terminate sooner if an offer becomes
// available that matches up.
return !task.Has(podtask.Launched) && k.api.algorithm().FitPredicate()(task, offer, nil)
default:
// no point in continuing to check for matching offers
return true
}
}))
}
delay := k.backoff.Get(podKey)
log.V(3).Infof("requeuing pod %v with delay %v", podKey, delay)
k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: breakoutEarly})
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
示例13: exportHTTP
func (cc *cadvisorClient) exportHTTP(port uint) error {
// Register the handlers regardless as this registers the prometheus
// collector properly.
mux := http.NewServeMux()
err := cadvisorHttp.RegisterHandlers(mux, cc, "", "", "", "", "/metrics")
if err != nil {
return err
}
// Only start the http server if port > 0
if port > 0 {
serv := &http.Server{
Addr: fmt.Sprintf(":%d", port),
Handler: mux,
}
// TODO(vmarmol): Remove this when the cAdvisor port is once again free.
// If export failed, retry in the background until we are able to bind.
// This allows an existing cAdvisor to be killed before this one registers.
go func() {
defer util.HandleCrash()
err := serv.ListenAndServe()
for err != nil {
glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err)
time.Sleep(time.Minute)
err = serv.ListenAndServe()
}
}()
}
return nil
}
示例14: receive
// receive reads result from the decoder in a loop and sends down the result channel.
func (sw *StreamWatcher) receive() {
defer close(sw.result)
defer sw.Stop()
defer util.HandleCrash()
for {
action, obj, err := sw.source.Decode()
if err != nil {
// Ignore expected error.
if sw.stopping() {
return
}
switch err {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
default:
msg := "Unable to decode an event from the watch stream: %v"
if net.IsProbableEOF(err) {
glog.V(5).Infof(msg, err)
} else {
glog.Errorf(msg, err)
}
}
return
}
sw.result <- Event{
Type: action,
Object: obj,
}
}
}
示例15: translate
// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be
// called as a goroutine.
func (w *etcdWatcher) translate() {
defer close(w.outgoing)
defer util.HandleCrash()
for {
select {
case err := <-w.etcdError:
if err != nil {
w.emit(watch.Event{
Type: watch.Error,
Object: &unversioned.Status{
Status: unversioned.StatusFailure,
Message: err.Error(),
},
})
}
return
case <-w.userStop:
w.etcdStop <- true
return
case res, ok := <-w.etcdIncoming:
if ok {
if curLen := int64(len(w.etcdIncoming)); watchChannelHWM.Check(curLen) {
// Monitor if this gets backed up, and how much.
glog.V(2).Infof("watch: %v objects queued in channel.", curLen)
}
w.sendResult(res)
}
// If !ok, don't return here-- must wait for etcdError channel
// to give an error or be closed.
}
}
}