本文整理汇总了Golang中github.com/juju/juju/state/watcher.Stop函数的典型用法代码示例。如果您正苦于以下问题:Golang Stop函数的具体用法?Golang Stop怎么用?Golang Stop使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Stop函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestStop
func (s *FastPeriodSuite) TestStop(c *gc.C) {
t := &tomb.Tomb{}
watcher.Stop(&dummyWatcher{nil}, t)
c.Assert(t.Err(), gc.Equals, tomb.ErrStillAlive)
watcher.Stop(&dummyWatcher{errors.New("BLAM")}, t)
c.Assert(t.Err(), gc.ErrorMatches, "BLAM")
}
示例2: stopWatchers
// stopWatchers stops all the firewaller's watchers.
func (fw *Firewaller) stopWatchers() {
watcher.Stop(fw.environWatcher, &fw.tomb)
watcher.Stop(fw.machinesWatcher, &fw.tomb)
for _, unitd := range fw.unitds {
watcher.Stop(unitd, &fw.tomb)
}
for _, serviced := range fw.serviceds {
watcher.Stop(serviced, &fw.tomb)
}
for _, machined := range fw.machineds {
watcher.Stop(machined, &fw.tomb)
}
}
示例3: ModeTerminating
// ModeTerminating marks the unit dead and returns ErrTerminateAgent.
func ModeTerminating(u *Uniter) (next Mode, err error) {
defer modeContext("ModeTerminating", &err)()
if err = u.unit.SetStatus(params.StatusStopped, "", nil); err != nil {
return nil, err
}
w, err := u.unit.Watch()
if err != nil {
return nil, err
}
defer watcher.Stop(w, &u.tomb)
for {
select {
case <-u.tomb.Dying():
return nil, tomb.ErrDying
case _, ok := <-w.Changes():
if !ok {
return nil, watcher.MustErr(w)
}
if err := u.unit.Refresh(); err != nil {
return nil, err
}
if hasSubs, err := u.unit.HasSubordinates(); err != nil {
return nil, err
} else if hasSubs {
continue
}
// The unit is known to be Dying; so if it didn't have subordinates
// just above, it can't acquire new ones before this call.
if err := u.unit.EnsureDead(); err != nil {
return nil, err
}
return nil, worker.ErrTerminateAgent
}
}
}
示例4: loop
// loop is the worker's main loop.
func (nw *Networker) loop() error {
logger.Debugf("starting on machine %q", nw.tag)
if !nw.CanWriteConfig() {
logger.Warningf("running in safe mode - no commands or changes to network config will be done")
}
w, err := nw.init()
if err != nil {
if w != nil {
// We don't bother to propagate an error, because we
// already have an error
w.Stop()
}
return err
}
defer watcher.Stop(w, &nw.tomb)
logger.Debugf("initialized and started watching")
for {
select {
case <-nw.tomb.Dying():
logger.Debugf("shutting down")
return tomb.ErrDying
case _, ok := <-w.Changes():
logger.Debugf("got change notification")
if !ok {
return watcher.MustErr(w)
}
if err := nw.handle(); err != nil {
return err
}
}
}
}
示例5: loop
func (nw *notifyWorker) loop() error {
w, err := nw.handler.SetUp()
if err != nil {
if w != nil {
// We don't bother to propagate an error, because we
// already have an error
w.Stop()
}
return err
}
defer propagateTearDown(nw.handler, &nw.tomb)
defer watcher.Stop(w, &nw.tomb)
for {
select {
case <-nw.tomb.Dying():
return tomb.ErrDying
case _, ok := <-w.Changes():
if !ok {
return ensureErr(w)
}
if err := nw.handler.Handle(nw.tomb.Dying()); err != nil {
return err
}
}
}
}
示例6: newStorageSource
// newStorageSource creates a hook source that watches for changes to,
// and generates storage hooks for, a single storage attachment.
func newStorageSource(
st StorageAccessor,
unitTag names.UnitTag,
storageTag names.StorageTag,
attached bool,
) (*storageSource, error) {
w, err := st.WatchStorageAttachment(storageTag, unitTag)
if err != nil {
return nil, errors.Annotate(err, "watching storage attachment")
}
s := &storageSource{
storageHookQueue: &storageHookQueue{
unitTag: unitTag,
storageTag: storageTag,
attached: attached,
},
st: st,
watcher: w,
changes: make(chan hook.SourceChange),
}
go func() {
defer s.tomb.Done()
defer watcher.Stop(w, &s.tomb)
s.tomb.Kill(s.loop())
}()
return s, nil
}
示例7: NewEnvironObserver
// NewEnvironObserver waits for the environment to have a valid
// environment configuration and returns a new environment observer.
// While waiting for the first environment configuration, it will
// return with tomb.ErrDying if it receives a value on dying.
func NewEnvironObserver(st EnvironConfigObserver) (*EnvironObserver, error) {
config, err := st.EnvironConfig()
if err != nil {
return nil, err
}
environ, err := environs.New(config)
if err != nil {
return nil, errors.Annotate(err, "cannot create an environment")
}
environWatcher, err := st.WatchForEnvironConfigChanges()
if err != nil {
return nil, errors.Annotate(err, "cannot watch environment config")
}
obs := &EnvironObserver{
st: st,
environ: environ,
environWatcher: environWatcher,
}
go func() {
defer obs.tomb.Done()
defer watcher.Stop(environWatcher, &obs.tomb)
obs.tomb.Kill(obs.loop())
}()
return obs, nil
}
示例8: addRelation
// addRelation causes the unit agent to join the supplied relation, and to
// store persistent state in the supplied dir.
func (u *Uniter) addRelation(rel *uniter.Relation, dir *relation.StateDir) error {
logger.Infof("joining relation %q", rel)
ru, err := rel.Unit(u.unit)
if err != nil {
return err
}
r := NewRelationer(ru, dir, u.relationHooks)
w, err := u.unit.Watch()
if err != nil {
return err
}
defer watcher.Stop(w, &u.tomb)
for {
select {
case <-u.tomb.Dying():
return tomb.ErrDying
case _, ok := <-w.Changes():
if !ok {
return watcher.MustErr(w)
}
err := r.Join()
if params.IsCodeCannotEnterScopeYet(err) {
logger.Infof("cannot enter scope for relation %q; waiting for subordinate to be removed", rel)
continue
} else if err != nil {
return err
}
logger.Infof("joined relation %q", rel)
u.relationers[rel.Id()] = r
return nil
}
}
}
示例9: loop
func (sw *stringsWorker) loop() error {
w, err := sw.handler.SetUp()
if err != nil {
if w != nil {
// We don't bother to propagate an error, because we
// already have an error
w.Stop()
}
return err
}
defer propagateTearDown(sw.handler, &sw.tomb)
defer watcher.Stop(w, &sw.tomb)
for {
select {
case <-sw.tomb.Dying():
return tomb.ErrDying
case changes, ok := <-w.Changes():
if !ok {
return mustErr(w)
}
if err := sw.handler.Handle(changes); err != nil {
return err
}
}
}
}
示例10: terminate
func (u *Uniter) terminate() error {
w, err := u.unit.Watch()
if err != nil {
return errors.Trace(err)
}
defer watcher.Stop(w, &u.tomb)
for {
select {
case <-u.tomb.Dying():
return tomb.ErrDying
case _, ok := <-w.Changes():
if !ok {
return watcher.EnsureErr(w)
}
if err := u.unit.Refresh(); err != nil {
return errors.Trace(err)
}
if hasSubs, err := u.unit.HasSubordinates(); err != nil {
return errors.Trace(err)
} else if hasSubs {
continue
}
// The unit is known to be Dying; so if it didn't have subordinates
// just above, it can't acquire new ones before this call.
if err := u.unit.EnsureDead(); err != nil {
return errors.Trace(err)
}
return worker.ErrTerminateAgent
}
}
}
示例11: finish
func (w *relationUnitsWatcher) finish() {
watcher.Stop(w.sw, &w.tomb)
for _, watchedValue := range w.watching.Values() {
w.st.watcher.Unwatch(w.st.settings.Name, watchedValue, w.updates)
}
close(w.updates)
close(w.out)
w.tomb.Done()
}
示例12: loop
func (task *provisionerTask) loop() error {
logger.Infof("Starting up provisioner task %s", task.machineTag)
defer watcher.Stop(task.machineWatcher, &task.tomb)
// Don't allow the harvesting mode to change until we have read at
// least one set of changes, which will populate the task.machines
// map. Otherwise we will potentially see all legitimate instances
// as unknown.
var harvestModeChan chan config.HarvestMode
// Not all provisioners have a retry channel.
var retryChan <-chan struct{}
if task.retryWatcher != nil {
retryChan = task.retryWatcher.Changes()
}
// When the watcher is started, it will have the initial changes be all
// the machines that are relevant. Also, since this is available straight
// away, we know there will be some changes right off the bat.
for {
select {
case <-task.tomb.Dying():
logger.Infof("Shutting down provisioner task %s", task.machineTag)
return tomb.ErrDying
case ids, ok := <-task.machineWatcher.Changes():
if !ok {
return watcher.EnsureErr(task.machineWatcher)
}
if err := task.processMachines(ids); err != nil {
return errors.Annotate(err, "failed to process updated machines")
}
// We've seen a set of changes. Enable modification of
// harvesting mode.
harvestModeChan = task.harvestModeChan
case harvestMode := <-harvestModeChan:
if harvestMode == task.harvestMode {
break
}
logger.Infof("harvesting mode changed to %s", harvestMode)
task.harvestMode = harvestMode
if harvestMode.HarvestUnknown() {
logger.Infof("harvesting unknown machines")
if err := task.processMachines(nil); err != nil {
return errors.Annotate(err, "failed to process machines after safe mode disabled")
}
}
case <-retryChan:
if err := task.processMachinesWithTransientErrors(); err != nil {
return errors.Annotate(err, "failed to process machines with transient errors")
}
}
}
}
示例13: loop
func (p *containerProvisioner) loop() error {
var environConfigChanges <-chan struct{}
environWatcher, err := p.st.WatchForEnvironConfigChanges()
if err != nil {
return err
}
environConfigChanges = environWatcher.Changes()
defer watcher.Stop(environWatcher, &p.tomb)
config, err := p.st.EnvironConfig()
if err != nil {
return err
}
harvestMode := config.ProvisionerHarvestMode()
task, err := p.getStartTask(harvestMode)
if err != nil {
return err
}
defer watcher.Stop(task, &p.tomb)
for {
select {
case <-p.tomb.Dying():
return tomb.ErrDying
case <-task.Dying():
err := task.Err()
logger.Errorf("%s provisioner died: %v", p.containerType, err)
return err
case _, ok := <-environConfigChanges:
if !ok {
return watcher.EnsureErr(environWatcher)
}
environConfig, err := p.st.EnvironConfig()
if err != nil {
logger.Errorf("cannot load environment configuration: %v", err)
return err
}
p.configObserver.notify(environConfig)
task.SetHarvestMode(environConfig.ProvisionerHarvestMode())
}
}
}
示例14: loop
func (q *AliveHookQueue) loop(initial *State) {
defer q.tomb.Done()
defer watcher.Stop(q.w, &q.tomb)
// Consume initial event, and reconcile with initial state, by inserting
// a new RelationUnitsChange before the initial event, which schedules
// every missing unit for immediate departure before anything else happens
// (apart from a single potential required post-joined changed event).
ch1, ok := <-q.w.Changes()
if !ok {
q.tomb.Kill(watcher.MustErr(q.w))
return
}
if len(ch1.Departed) != 0 {
panic("AliveHookQueue must be started with a fresh RelationUnitsWatcher")
}
q.changedPending = initial.ChangedPending
ch0 := params.RelationUnitsChange{}
for unit, version := range initial.Members {
q.info[unit] = &unitInfo{
unit: unit,
version: version,
joined: true,
}
if _, found := ch1.Changed[unit]; !found {
ch0.Departed = append(ch0.Departed, unit)
}
}
q.update(ch0)
q.update(ch1)
var next hook.Info
var out chan<- hook.Info
for {
if q.empty() {
out = nil
} else {
out = q.out
next = q.next()
}
select {
case <-q.tomb.Dying():
return
case ch, ok := <-q.w.Changes():
if !ok {
q.tomb.Kill(watcher.MustErr(q.w))
return
}
q.update(ch)
case out <- next:
q.pop()
}
}
}
示例15: NewSender
// NewSender starts sending hooks from source onto the out channel, and will
// continue to do so until Stop()ped (or the source is exhausted). NewSender
// takes ownership of the supplied source, and responsibility for cleaning it up;
// but it will not close the out channel.
func NewSender(out chan<- Info, source Source) Sender {
sender := &hookSender{
out: out,
}
go func() {
defer sender.tomb.Done()
defer watcher.Stop(source, &sender.tomb)
sender.tomb.Kill(sender.loop(source))
}()
return sender
}