本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/wait.Until函数的典型用法代码示例。如果您正苦于以下问题:Golang Until函数的具体用法?Golang Until怎么用?Golang Until使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Until函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Run
// Runs controller blocks until stopCh is closed
func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
// Start controllers (to fill stores, call informers, fill work queues)
go e.serviceAccountController.Run(stopCh)
go e.secretController.Run(stopCh)
// Wait for stores to fill
for !e.serviceAccountController.HasSynced() || !e.secretController.HasSynced() {
time.Sleep(100 * time.Millisecond)
}
// Spawn workers to process work queues
for i := 0; i < workers; i++ {
go wait.Until(e.syncServiceAccount, 0, stopCh)
go wait.Until(e.syncSecret, 0, stopCh)
}
// Block until stop channel is closed
<-stopCh
// Shut down queues
e.syncServiceAccountQueue.ShutDown()
e.syncSecretQueue.ShutDown()
}
示例2: Run
// Run begins quota controller using the specified number of workers
func (c *ClusterQuotaReconcilationController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
// Wait for the stores to sync before starting any work in this controller.
ready := make(chan struct{})
go c.waitForSyncedStores(ready, stopCh)
select {
case <-ready:
case <-stopCh:
return
}
// the controllers that replenish other resources to respond rapidly to state changes
for _, replenishmentController := range c.replenishmentControllers {
go replenishmentController.Run(stopCh)
}
// the workers that chug through the quota calculation backlog
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
// the timer for how often we do a full recalculation across all quotas
go wait.Until(func() { c.calculateAll() }, c.resyncPeriod, stopCh)
<-stopCh
glog.Infof("Shutting down ClusterQuotaReconcilationController")
c.queue.ShutDown()
}
示例3: Start
func (im *realImageGCManager) Start() error {
go wait.Until(func() {
// Initial detection make detected time "unknown" in the past.
var ts time.Time
if im.initialized {
ts = time.Now()
}
err := im.detectImages(ts)
if err != nil {
glog.Warningf("[imageGCManager] Failed to monitor images: %v", err)
} else {
im.initialized = true
}
}, 5*time.Minute, wait.NeverStop)
// Start a goroutine periodically updates image cache.
// TODO(random-liu): Merge this with the previous loop.
go wait.Until(func() {
images, err := im.runtime.ListImages()
if err != nil {
glog.Warningf("[imageGCManager] Failed to update image list: %v", err)
} else {
im.imageCache.set(images)
}
}, 30*time.Second, wait.NeverStop)
return nil
}
示例4: Run
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
glog.Infof("Garbage Collector: Initializing")
for _, monitor := range gc.monitors {
go monitor.controller.Run(stopCh)
}
wait.PollInfinite(10*time.Second, func() (bool, error) {
for _, monitor := range gc.monitors {
if !monitor.controller.HasSynced() {
glog.Infof("Garbage Collector: Waiting for resource monitors to be synced...")
return false, nil
}
}
return true, nil
})
glog.Infof("Garbage Collector: All monitored resources synced. Proceeding to collect garbage")
// worker
go wait.Until(gc.propagator.processEvent, 0, stopCh)
for i := 0; i < workers; i++ {
go wait.Until(gc.worker, 0, stopCh)
go wait.Until(gc.orphanFinalizer, 0, stopCh)
}
Register()
<-stopCh
glog.Infof("Garbage Collector: Shutting down")
gc.dirtyQueue.ShutDown()
gc.orphanQueue.ShutDown()
gc.propagator.eventQueue.ShutDown()
}
示例5: Run
// Run starts a background goroutine that watches for changes to services that
// have (or had) LoadBalancers=true and ensures that they have
// load balancers created and deleted appropriately.
// serviceSyncPeriod controls how often we check the cluster's services to
// ensure that the correct load balancers exist.
// nodeSyncPeriod controls how often we check the cluster's nodes to determine
// if load balancers need to be updated to point to a new set.
//
// It's an error to call Run() more than once for a given ServiceController
// object.
func (s *ServiceController) Run(workers int) {
defer runtime.HandleCrash()
go s.serviceController.Run(wait.NeverStop)
for i := 0; i < workers; i++ {
go wait.Until(s.worker, time.Second, wait.NeverStop)
}
nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
cache.NewReflector(nodeLW, &v1.Node{}, s.nodeLister.Store, 0).Run()
go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop)
}
示例6: Run
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go rq.rqController.Run(stopCh)
go rq.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go wait.Until(rq.worker, time.Second, stopCh)
}
go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
<-stopCh
glog.Infof("Shutting down ResourceQuotaController")
rq.queue.ShutDown()
}
示例7: Run
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run() {
go func() {
defer utilruntime.HandleCrash()
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync..."))
return
}
// Incorporate the results of node status pushed from kubelet to master.
go wait.Until(func() {
if err := nc.monitorNodeStatus(); err != nil {
glog.Errorf("Error monitoring node status: %v", err)
}
}, nc.nodeMonitorPeriod, wait.NeverStop)
// Managing eviction of nodes:
// When we delete pods off a node, if the node was not empty at the time we then
// queue an eviction watcher. If we hit an error, retry deletion.
go wait.Until(func() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zonePodEvictor {
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
obj, exists, err := nc.nodeStore.GetByKey(value.Value)
if err != nil {
glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err)
} else if !exists {
glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
} else {
node, _ := obj.(*v1.Node)
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
nodeUid, _ := value.UID.(string)
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if remaining {
glog.Infof("Pods awaiting deletion due to NodeController eviction")
}
return true, 0
})
}
}, nodeEvictionPeriod, wait.NeverStop)
}()
}
示例8: Run
// It's an error to call Run() more than once for a given ServiceController
// object.
func (s *ServiceController) Run(workers int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
go s.serviceController.Run(stopCh)
go s.clusterController.Run(stopCh)
for i := 0; i < workers; i++ {
go wait.Until(s.fedServiceWorker, time.Second, stopCh)
}
go wait.Until(s.clusterEndpointWorker, time.Second, stopCh)
go wait.Until(s.clusterServiceWorker, time.Second, stopCh)
go wait.Until(s.clusterSyncLoop, clusterSyncPeriod, stopCh)
<-stopCh
glog.Infof("Shutting down Federation Service Controller")
s.queue.ShutDown()
return nil
}
示例9: startKubelet
func startKubelet(k KubeletBootstrap, podCfg *config.PodConfig, kc *KubeletConfig) {
// start the kubelet
go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)
// start the kubelet server
if kc.EnableServer {
go wait.Until(func() {
k.ListenAndServe(kc.Address, kc.Port, kc.TLSOptions, kc.Auth, kc.EnableDebuggingHandlers)
}, 0, wait.NeverStop)
}
if kc.ReadOnlyPort > 0 {
go wait.Until(func() {
k.ListenAndServeReadOnly(kc.Address, kc.ReadOnlyPort)
}, 0, wait.NeverStop)
}
}
示例10: Start
func (r *ResourceCollector) Start() {
// Get the cgroup containers for kubelet and docker
kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "")
dockerContainer, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
if err == nil {
systemContainers = map[string]string{
stats.SystemContainerKubelet: kubeletContainer,
stats.SystemContainerRuntime: dockerContainer,
}
} else {
framework.Failf("Failed to get docker container name in test-e2e-node resource collector.")
}
wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
var err error
r.client, err = cadvisorclient.NewClient(fmt.Sprintf("http://localhost:%d/", cadvisorPort))
if err == nil {
return true, nil
}
return false, err
})
Expect(r.client).NotTo(BeNil(), "cadvisor client not ready")
r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
r.stopCh = make(chan struct{})
oldStatsMap := make(map[string]*cadvisorapiv2.ContainerStats)
go wait.Until(func() { r.collectStats(oldStatsMap) }, r.pollingInterval, r.stopCh)
}
示例11: TestGetClusterPolicy
// TestGetClusterPolicy tests that a ReadOnlyPolicyClient GetPolicy() call correctly retrieves a cluster policy
// when the namespace given is equal to the empty string
func TestGetClusterPolicy(t *testing.T) {
testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
defer close(policyStopChannel)
defer close(bindingStopChannel)
var clusterPolicy *authorizationapi.Policy
var err error
namespace := ""
context := kapi.WithNamespace(kapi.NewContext(), namespace)
name := "uniqueClusterPolicyName"
utilwait.Until(func() {
clusterPolicy, err = testClient.GetPolicy(context, name)
if (err == nil) &&
(clusterPolicy != nil) &&
(clusterPolicy.Name == name) &&
(clusterPolicy.Namespace == namespace) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting cluster policy using GetPolicy(): %v", err)
case clusterPolicy == nil:
t.Error("Policy is nil")
case clusterPolicy.Name != name:
t.Errorf("Expected policy.Name to be '%s', but got '%s'", name, clusterPolicy.Name)
case clusterPolicy.Namespace != "":
t.Errorf("Expected policy.Namespace to be '%s', but got '%s'", namespace, clusterPolicy.Namespace)
}
}
示例12: Start
func (cm *containerManagerImpl) Start() error {
// Setup the node
if err := cm.setupNode(); err != nil {
return err
}
// Don't run a background thread if there are no ensureStateFuncs.
numEnsureStateFuncs := 0
for _, cont := range cm.systemContainers {
if cont.ensureStateFunc != nil {
numEnsureStateFuncs++
}
}
if numEnsureStateFuncs == 0 {
return nil
}
// Run ensure state functions every minute.
go wait.Until(func() {
for _, cont := range cm.systemContainers {
if cont.ensureStateFunc != nil {
if err := cont.ensureStateFunc(cont.manager); err != nil {
glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
}
}
}
}, time.Minute, wait.NeverStop)
return nil
}
示例13: Run
func (frsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
go frsc.replicaSetController.Run(stopCh)
frsc.fedReplicaSetInformer.Start()
frsc.fedPodInformer.Start()
frsc.replicasetDeliverer.StartWithHandler(func(item *fedutil.DelayingDelivererItem) {
frsc.replicasetWorkQueue.Add(item.Key)
})
frsc.clusterDeliverer.StartWithHandler(func(_ *fedutil.DelayingDelivererItem) {
frsc.reconcileReplicaSetsOnClusterChange()
})
for !frsc.isSynced() {
time.Sleep(5 * time.Millisecond)
}
for i := 0; i < workers; i++ {
go wait.Until(frsc.worker, time.Second, stopCh)
}
fedutil.StartBackoffGC(frsc.replicaSetBackoff, stopCh)
<-stopCh
glog.Infof("Shutting down ReplicaSetController")
frsc.replicasetDeliverer.Stop()
frsc.clusterDeliverer.Stop()
frsc.replicasetWorkQueue.ShutDown()
frsc.fedReplicaSetInformer.Stop()
frsc.fedPodInformer.Stop()
}
示例14: TestPolicyGet
// TestPolicyGet tests that a Get() call to the ReadOnlyPolicyCache will retrieve the correct policy
func TestPolicyGet(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
defer close(cacheChannel)
var policy *authorizationapi.Policy
var err error
namespace := "namespaceTwo"
name := "uniquePolicyName"
utilwait.Until(func() {
policy, err = testCache.Get(name, namespace)
if (err == nil) &&
(policy != nil) &&
(policy.Name == name) &&
(policy.Namespace == namespace) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting policy using ReadOnlyPolicyCache: %v", err)
case policy == nil:
t.Error("Policy is nil")
case policy.Name != name:
t.Errorf("Expected policy name to be '%s', was '%s'", name, policy.Name)
case policy.Namespace != namespace:
t.Errorf("Expected policy namespace to be '%s', was '%s'", namespace, policy.Namespace)
}
}
示例15: RunUntil
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
utilwait.Until(func() {
if err := c.RunOnce(); err != nil {
utilruntime.HandleError(err)
}
}, c.interval, ch)
}