本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/runtime.HandleCrash函数的典型用法代码示例。如果您正苦于以下问题:Golang HandleCrash函数的具体用法?Golang HandleCrash怎么用?Golang HandleCrash使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了HandleCrash函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Run
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer e.queue.ShutDown()
go e.serviceController.Run(stopCh)
go e.podController.Run(stopCh)
if !cache.WaitForCacheSync(stopCh, e.podStoreSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(e.worker, time.Second, stopCh)
}
go func() {
defer utilruntime.HandleCrash()
time.Sleep(5 * time.Minute) // give time for our cache to fill
e.checkLeftoverEndpoints()
}()
if e.internalPodInformer != nil {
go e.internalPodInformer.Run(stopCh)
}
<-stopCh
}
示例2: Run
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *endpointController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go e.serviceController.Run(stopCh)
go e.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(e.worker, time.Second, stopCh)
}
go func() {
defer utilruntime.HandleCrash()
time.Sleep(5 * time.Minute) // give time for our cache to fill
e.checkLeftoverEndpoints()
}()
<-stopCh
e.queue.ShutDown()
}
示例3: ObserveResourceVersion
func (c *clusterResourceVersionObserver) ObserveResourceVersion(resourceVersion string, timeout time.Duration) error {
if len(c.watchers) == 0 {
return nil
}
wg := &sync.WaitGroup{}
backendErrors := make([]error, len(c.watchers), len(c.watchers))
for i, watcher := range c.watchers {
wg.Add(1)
go func(i int, watcher rest.Watcher) {
defer utilruntime.HandleCrash()
defer wg.Done()
backendErrors[i] = watchForResourceVersion(c.versioner, watcher, resourceVersion, timeout)
}(i, watcher)
}
glog.V(5).Infof("waiting for resourceVersion %s to be distributed", resourceVersion)
wg.Wait()
successes := 0
for _, err := range backendErrors {
if err == nil {
successes++
} else {
glog.V(4).Infof("error verifying resourceVersion %s: %v", resourceVersion, err)
}
}
glog.V(5).Infof("resourceVersion %s was distributed to %d etcd cluster members (out of %d)", resourceVersion, successes, len(c.watchers))
if successes >= c.successThreshold {
return nil
}
return fmt.Errorf("resourceVersion %s was observed on %d cluster members (threshold %d): %v", resourceVersion, successes, c.successThreshold, backendErrors)
}
示例4: exportHTTP
func (cc *cadvisorClient) exportHTTP(port uint) error {
// Register the handlers regardless as this registers the prometheus
// collector properly.
mux := http.NewServeMux()
err := cadvisorhttp.RegisterHandlers(mux, cc, "", "", "", "")
if err != nil {
return err
}
cadvisorhttp.RegisterPrometheusHandler(mux, cc, "/metrics", containerLabels)
// Only start the http server if port > 0
if port > 0 {
serv := &http.Server{
Addr: fmt.Sprintf(":%d", port),
Handler: mux,
}
// TODO(vmarmol): Remove this when the cAdvisor port is once again free.
// If export failed, retry in the background until we are able to bind.
// This allows an existing cAdvisor to be killed before this one registers.
go func() {
defer runtime.HandleCrash()
err := serv.ListenAndServe()
for err != nil {
glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err)
time.Sleep(time.Minute)
err = serv.ListenAndServe()
}
}()
}
return nil
}
示例5: Run
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting HPA Controller")
go a.controller.Run(stopCh)
<-stopCh
glog.Infof("Shutting down HPA Controller")
}
示例6: pop
func (p *processorListener) pop(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
for {
blockingGet := func() (interface{}, bool) {
p.lock.Lock()
defer p.lock.Unlock()
for len(p.pendingNotifications) == 0 {
// check if we're shutdown
select {
case <-stopCh:
return nil, true
default:
}
p.cond.Wait()
}
nt := p.pendingNotifications[0]
p.pendingNotifications = p.pendingNotifications[1:]
return nt, false
}
notification, stopped := blockingGet()
if stopped {
return
}
select {
case <-stopCh:
return
case p.nextCh <- notification:
}
}
}
示例7: receive
// receive reads result from the decoder in a loop and sends down the result channel.
func (sw *StreamWatcher) receive() {
defer close(sw.result)
defer sw.Stop()
defer utilruntime.HandleCrash()
for {
action, obj, err := sw.source.Decode()
if err != nil {
// Ignore expected error.
if sw.stopping() {
return
}
switch err {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
default:
msg := "Unable to decode an event from the watch stream: %v"
if net.IsProbableEOF(err) {
glog.V(5).Infof(msg, err)
} else {
glog.Errorf(msg, err)
}
}
return
}
sw.result <- Event{
Type: action,
Object: obj,
}
}
}
示例8: buildImage
// buildImage invokes a docker build on a particular directory
func buildImage(client DockerClient, dir string, dockerfilePath string, noCache bool, tag string, tar tar.Tar, pullAuth *docker.AuthConfigurations, forcePull bool, cgLimits *s2iapi.CGroupLimits) error {
// TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit
r, w := io.Pipe()
go func() {
defer utilruntime.HandleCrash()
defer w.Close()
if err := tar.CreateTarStream(dir, false, w); err != nil {
w.CloseWithError(err)
}
}()
defer w.Close()
glog.V(5).Infof("Invoking Docker build to create %q", tag)
opts := docker.BuildImageOptions{
Name: tag,
RmTmpContainer: true,
OutputStream: os.Stdout,
InputStream: r,
Dockerfile: dockerfilePath,
NoCache: noCache,
Pull: forcePull,
}
if cgLimits != nil {
opts.Memory = cgLimits.MemoryLimitBytes
opts.Memswap = cgLimits.MemorySwap
opts.CPUShares = cgLimits.CPUShares
opts.CPUPeriod = cgLimits.CPUPeriod
opts.CPUQuota = cgLimits.CPUQuota
}
if pullAuth != nil {
opts.AuthConfigs = *pullAuth
}
return client.BuildImage(opts)
}
示例9: handle
// handle implements a WebSocket handler.
func (r *Reader) handle(ws *websocket.Conn) {
// Close the connection when the client requests it, or when we finish streaming, whichever happens first
closeConnOnce := &sync.Once{}
closeConn := func() {
closeConnOnce.Do(func() {
ws.Close()
})
}
negotiated := ws.Config().Protocol
r.selectedProtocol = negotiated[0]
defer close(r.err)
defer closeConn()
go func() {
defer runtime.HandleCrash()
// This blocks until the connection is closed.
// Client should not send anything.
IgnoreReceives(ws, r.timeout)
// Once the client closes, we should also close
closeConn()
}()
r.err <- messageCopy(ws, r.r, !r.protocols[r.selectedProtocol].Binary, r.ping, r.timeout)
}
示例10: Run
// Run begins watching and syncing.
func (ic *IngressIPController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go ic.controller.Run(stopCh)
glog.V(5).Infof("Waiting for the initial sync to be completed")
for !ic.controller.HasSynced() {
select {
case <-time.After(SyncProcessedPollPeriod):
case <-stopCh:
return
}
}
if !ic.processInitialSync() {
return
}
glog.V(5).Infof("Starting normal worker")
for {
if !ic.work() {
break
}
}
glog.V(5).Infof("Shutting down ingress ip controller")
ic.queue.ShutDown()
}
示例11: gather
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer Logf("Closing worker for %v", w.nodeName)
select {
case <-time.After(initialSleep):
// TODO: remove after #21313 is fixed
Logf("Probing %v", w.nodeName)
w.singleProbe()
// TODO: remove after #21313 is fixed
Logf("Finished probe for %v", w.nodeName)
for {
select {
case <-time.After(resourceDataGatheringPeriod):
// TODO: remove after #21313 is fixed
Logf("Probing %v", w.nodeName)
w.singleProbe()
// TODO: remove after #21313 is fixed
Logf("Finished probe for %v", w.nodeName)
case <-w.stopCh:
return
}
}
case <-w.stopCh:
return
}
}
示例12: TestAdmitExceedQuotaLimit
// TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission.
func TestAdmitExceedQuotaLimit(t *testing.T) {
resourceQuota := &api.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("100Gi"),
api.ResourcePods: resource.MustParse("5"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("1"),
api.ResourceMemory: resource.MustParse("50Gi"),
api.ResourcePods: resource.MustParse("3"),
},
},
}
kubeClient := fake.NewSimpleClientset(resourceQuota)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer
stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := "aAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator,
}
indexer.Add(resourceQuota)
newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error exceeding quota")
}
}
示例13: TestAdmissionIgnoresSubresources
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
resourceQuota := &api.ResourceQuota{}
resourceQuota.Name = "quota"
resourceQuota.Namespace = "test"
resourceQuota.Status = api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
kubeClient := fake.NewSimpleClientset(resourceQuota)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer
stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := "aAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator,
}
indexer.Add(resourceQuota)
newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error because the pod exceeded allowed quota")
}
err = handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, nil))
if err != nil {
t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
}
}
示例14: Run
func (e *DockercfgController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
// Wait for the store to sync before starting any work in this controller.
ready := make(chan struct{})
go e.waitForDockerURLs(ready, stopCh)
select {
case <-ready:
case <-stopCh:
return
}
glog.Infof("Dockercfg secret controller initialized, starting.")
go e.serviceAccountController.Run(stopCh)
go e.secretController.Run(stopCh)
for !e.serviceAccountController.HasSynced() || !e.secretController.HasSynced() {
time.Sleep(100 * time.Millisecond)
}
for i := 0; i < workers; i++ {
go wait.Until(e.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down dockercfg secret controller")
e.queue.ShutDown()
}
示例15: process
func (c *cacheWatcher) process(initEvents []watchCacheEvent, resourceVersion uint64) {
defer utilruntime.HandleCrash()
// Check how long we are processing initEvents.
// As long as these are not processed, we are not processing
// any incoming events, so if it takes long, we may actually
// block all watchers for some time.
// TODO: If it appears to be long in some cases, we may consider
// - longer result buffers if there are a lot of initEvents
// - try some parallelization
const initProcessThreshold = 5 * time.Millisecond
startTime := time.Now()
for _, event := range initEvents {
c.sendWatchCacheEvent(event)
}
processingTime := time.Since(startTime)
if processingTime > initProcessThreshold {
glog.V(2).Infof("processing %d initEvents took %v", len(initEvents), processingTime)
}
defer close(c.result)
defer c.Stop()
for {
event, ok := <-c.input
if !ok {
return
}
// only send events newer than resourceVersion
if event.ResourceVersion > resourceVersion {
c.sendWatchCacheEvent(event)
}
}
}