本文整理汇总了Golang中k8s/io/kubernetes/pkg/util.Until函数的典型用法代码示例。如果您正苦于以下问题:Golang Until函数的具体用法?Golang Until怎么用?Golang Until使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Until函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: doWork
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
c := cache.NewCache(*argCacheDuration, time.Minute)
sources, err := newSources(c)
if err != nil {
return nil, nil, nil, err
}
sinkManager, err := sinks.NewExternalSinkManager(nil, c, *argSinkFrequency)
if err != nil {
return nil, nil, nil, err
}
manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution)
if err != nil {
return nil, nil, nil, err
}
if err := manager.SetSinkUris(argSinks); err != nil {
return nil, nil, nil, err
}
// Spawn the Model Housekeeping goroutine even if the model is not enabled.
// This will allow the model to be activated/deactivated in runtime.
modelDuration := 2 * *argModelResolution
if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
modelDuration = *argCacheDuration
}
go util.Until(manager.HousekeepModel, modelDuration, util.NeverStop)
go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
return sources, sinkManager, manager, nil
}
示例2: doWork
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
c := cache.NewCache(*argCacheDuration, time.Minute)
sources, err := newSources(c)
if err != nil {
return nil, nil, nil, err
}
sinkManager, err := sinks.NewExternalSinkManager(nil, c, *argSinkFrequency)
if err != nil {
return nil, nil, nil, err
}
manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution)
if err != nil {
return nil, nil, nil, err
}
if err := manager.SetSinkUris(argSinks); err != nil {
return nil, nil, nil, err
}
// Spawn the Model Housekeeping goroutine even if the model is not enabled.
// This will allow the model to be activated/deactivated in runtime.
// Set the housekeeping period to 2 * argModelResolution + 25 sec
// TODO(afein): select a more well-defined housekeeping interval
modelDuration := 2 * *argModelResolution
modelDuration = time.Time{}.Add(modelDuration).Add(25 * time.Second).Sub(time.Time{})
if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
modelDuration = *argCacheDuration
}
go util.Until(manager.HousekeepModel, modelDuration, util.NeverStop)
go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
return sources, sinkManager, manager, nil
}
示例3: Run
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run(period time.Duration) {
// Incorporate the results of node status pushed from kubelet to master.
go util.Until(func() {
if err := nc.monitorNodeStatus(); err != nil {
glog.Errorf("Error monitoring node status: %v", err)
}
}, nc.nodeMonitorPeriod, util.NeverStop)
// Managing eviction of nodes:
// 1. when we delete pods off a node, if the node was not empty at the time we then
// queue a termination watcher
// a. If we hit an error, retry deletion
// 2. The terminator loop ensures that pods are eventually cleaned and we never
// terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt
// is the time from which we measure "has this pod been terminating too long",
// after which we will delete the pod with grace period 0 (force delete).
// a. If we hit errors, retry instantly
// b. If there are no pods left terminating, exit
// c. If there are pods still terminating, wait for their estimated completion
// before retrying
go util.Until(func() {
nc.podEvictor.Try(func(value TimedValue) (bool, time.Duration) {
remaining, err := nc.deletePods(value.Value)
if err != nil {
util.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if remaining {
nc.terminationEvictor.Add(value.Value)
}
return true, 0
})
}, nodeEvictionPeriod, util.NeverStop)
// TODO: replace with a controller that ensures pods that are terminating complete
// in a particular time period
go util.Until(func() {
nc.terminationEvictor.Try(func(value TimedValue) (bool, time.Duration) {
completed, remaining, err := nc.terminatePods(value.Value, value.AddedAt)
if err != nil {
util.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err))
return false, 0
}
if completed {
glog.Infof("All pods terminated on %s", value.Value)
nc.recordNodeEvent(value.Value, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
return true, 0
}
glog.V(2).Infof("Pods terminating since %s on %q, estimated completion %s", value.AddedAt, value.Value, remaining)
// clamp very short intervals
if remaining < nodeEvictionPeriod {
remaining = nodeEvictionPeriod
}
return false, remaining
})
}, nodeEvictionPeriod, util.NeverStop)
}
示例4: Run
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go rq.rqController.Run(stopCh)
go rq.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(rq.worker, time.Second, stopCh)
}
go util.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
<-stopCh
glog.Infof("Shutting down ResourceQuotaController")
rq.queue.ShutDown()
}
示例5: NewCacher
// Create a new Cacher responsible from service WATCH and LIST requests from its
// internal cache and updating its cache in the background based on the given
// configuration.
func NewCacher(config CacherConfig) *Cacher {
watchCache := cache.NewWatchCache(config.CacheCapacity)
listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
cacher := &Cacher{
initialized: sync.WaitGroup{},
watchCache: watchCache,
reflector: cache.NewReflector(listerWatcher, config.Type, watchCache, 0),
watcherIdx: 0,
watchers: make(map[int]*cacheWatcher),
versioner: config.Versioner,
keyFunc: config.KeyFunc,
}
cacher.initialized.Add(1)
// See startCaching method for why explanation on it.
watchCache.SetOnReplace(func() {
cacher.initOnce.Do(func() { cacher.initialized.Done() })
cacher.Unlock()
})
watchCache.SetOnEvent(cacher.processEvent)
stopCh := config.StopChannel
go util.Until(func() { cacher.startCaching(stopCh) }, 0, stopCh)
cacher.initialized.Wait()
return cacher
}
示例6: Run
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run(_ []string) error {
// remove iptables rules and exit
if s.Config.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
s.Broadcaster.StartRecordingToSink(s.Client.Events(""))
// Birth Cry after the birth is successful
s.birthCry()
// Start up Healthz service if requested
if s.Config.HealthzPort > 0 {
go util.Until(func() {
err := http.ListenAndServe(s.Config.HealthzBindAddress.String()+":"+strconv.Itoa(s.Config.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
// Just loop forever for now...
s.Proxier.SyncLoop()
return nil
}
示例7: Run
func (d *DeploymentController) Run(syncPeriod time.Duration) {
go util.Until(func() {
if err := d.reconcileDeployments(); err != nil {
glog.Errorf("Couldnt reconcile deployments: %v", err)
}
}, syncPeriod, util.NeverStop)
}
示例8: RunKubernetesService
// RunKubernetesService periodically updates the kubernetes service
func (c *Controller) RunKubernetesService(ch chan struct{}) {
util.Until(func() {
if err := c.UpdateKubernetesService(); err != nil {
util.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err))
}
}, c.EndpointInterval, ch)
}
示例9: RunUntil
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
util.Until(func() {
if err := c.RunOnce(); err != nil {
util.HandleError(err)
}
}, c.interval, ch)
}
示例10: TestClusterPolicyBindingListRespectingFields
// TestClusterPolicyBindingListRespectingFields tests that a List() call, filtered with a field to the ReadOnlyClusterPolicyBindingCache
// will return all clusterPolicyBindings matching that field
func TestClusterPolicyBindingListRespectingFields(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicybindingcache()
defer close(cacheChannel)
var clusterPolicyBindings *authorizationapi.ClusterPolicyBindingList
var err error
name := "uniqueClusterPolicyBindingName"
field := fields.OneTermEqualSelector("metadata.name", name)
util.Until(func() {
clusterPolicyBindings, err = testCache.List(&unversioned.ListOptions{FieldSelector: unversioned.FieldSelector{Selector: field}})
if (err == nil) &&
(clusterPolicyBindings != nil) &&
(len(clusterPolicyBindings.Items) == 1) &&
(clusterPolicyBindings.Items[0].Name == name) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting clusterPolicyBinding with fieldSelector using ReadOnlyClusterBindingCache: %v", err)
case clusterPolicyBindings == nil:
t.Error("ClusterPolicyBindingList using fieldSelector is nil")
case len(clusterPolicyBindings.Items) != 1:
t.Errorf("Expected clusterPolicyBindingList using fieldSelector to contain 1 items, had %d", len(clusterPolicyBindings.Items))
case clusterPolicyBindings.Items[0].Name != name:
t.Errorf("Expected clusterPolicyBinding to have name '%s', had '%s'", name, clusterPolicyBindings.Items[0].Name)
}
}
示例11: TestPolicyList
// TestPolicyList tests that a List() call for a namespace to the ReadOnlyPolicyCache will return all policies in that namespace
func TestPolicyList(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
defer close(cacheChannel)
var policies *authorizationapi.PolicyList
var err error
namespace := "namespaceTwo"
util.Until(func() {
policies, err = testCache.List(nil, namespace)
if (err == nil) &&
(policies != nil) &&
(len(policies.Items) == 2) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
case policies == nil:
t.Error("PoliciesList is nil")
case len(policies.Items) != 2:
t.Errorf("Expected policyList to have 2 policies, had %d", len(policies.Items))
}
}
示例12: TestPolicyGet
// TestPolicyGet tests that a Get() call to the ReadOnlyPolicyCache will retrieve the correct policy
func TestPolicyGet(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
defer close(cacheChannel)
var policy *authorizationapi.Policy
var err error
namespace := "namespaceTwo"
name := "uniquePolicyName"
util.Until(func() {
policy, err = testCache.Get(name, namespace)
if (err == nil) &&
(policy != nil) &&
(policy.Name == name) &&
(policy.Namespace == namespace) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting policy using ReadOnlyPolicyCache: %v", err)
case policy == nil:
t.Error("Policy is nil")
case policy.Name != name:
t.Errorf("Expected policy name to be '%s', was '%s'", name, policy.Name)
case policy.Namespace != namespace:
t.Errorf("Expected policy namespace to be '%s', was '%s'", namespace, policy.Namespace)
}
}
示例13: TestPolicyListRespectingFields
// TestPolicyListRespectingFields tests that a List() call for some namespace, filtered with a field to the ReadOnlyPolicyCache
// will return all policies in that namespace matching that field
func TestPolicyListRespectingFields(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
defer close(cacheChannel)
var policies *authorizationapi.PolicyList
var err error
name := "uniquePolicyName"
namespace := "namespaceTwo"
field := fields.OneTermEqualSelector("metadata.name", name)
util.Until(func() {
policies, err = testCache.List(&kapi.ListOptions{FieldSelector: field}, namespace)
if (err == nil) &&
(policies != nil) &&
(len(policies.Items) == 1) &&
(policies.Items[0].Name == name) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
case policies == nil:
t.Error("PoliciesList is nil")
case len(policies.Items) != 1:
t.Errorf("Expected policyList to have 1 policy, had %d", len(policies.Items))
case policies.Items[0].Name != name:
t.Errorf("Expected policy name to be '%s', was '%s'", name, policies.Items[0].Name)
}
}
示例14: TestListClusterPolicyBindings
// TestListClusterPolicyBindings tests that a ReadOnlyPolicyClient ListPolicyBindings() call correctly lists cluster policy bindings
// when the namespace given is the empty string
func TestListClusterPolicyBindings(t *testing.T) {
testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
defer close(policyStopChannel)
defer close(bindingStopChannel)
var clusterPolicyBindings *authorizationapi.PolicyBindingList
var err error
namespace := ""
context := kapi.WithNamespace(kapi.NewContext(), namespace)
util.Until(func() {
clusterPolicyBindings, err = testClient.ListPolicyBindings(context, nil)
if (err == nil) &&
(clusterPolicyBindings != nil) &&
(len(clusterPolicyBindings.Items) == 2) &&
(strings.Contains(clusterPolicyBindings.Items[0].Name, "Cluster")) &&
(strings.Contains(clusterPolicyBindings.Items[1].Name, "Cluster")) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting cluster policy binding using ListPolicyBindings(): %v", err)
case clusterPolicyBindings == nil:
t.Error("ClusterPolicyBindingsList is nil")
case len(clusterPolicyBindings.Items) != 2:
t.Errorf("ClusterPolicyBindingsList contains %d items, should contain 2.", len(clusterPolicyBindings.Items))
case !strings.Contains(clusterPolicyBindings.Items[0].Name, "Cluster") || !strings.Contains(clusterPolicyBindings.Items[1].Name, "Cluster"):
t.Error("ClusterPolicyBinding name should contain \"Cluster\", but did not.")
}
}
示例15: TestPolicyListNamespaceAll
// TestPolicyListNamespaceAll tests that a List() call for kapi.NamespaceAll to the ReadOnlyPolicyCache will return
// all policies in all namespaces
func TestPolicyListNamespaceAll(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
defer close(cacheChannel)
var policies *authorizationapi.PolicyList
var err error
namespace := kapi.NamespaceAll
label := labels.Everything()
field := fields.Everything()
util.Until(func() {
policies, err = testCache.List(label, field, namespace)
if (err == nil) &&
(policies != nil) &&
(len(policies.Items) == 3) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
case policies == nil:
t.Error("PoliciesList is nil")
case len(policies.Items) != 3:
t.Errorf("Expected policyList to have 3 policies, had %d", len(policies.Items))
}
}