本文整理汇总了Golang中github.com/juju/juju/worker.Kill函数的典型用法代码示例。如果您正苦于以下问题:Golang Kill函数的具体用法?Golang Kill怎么用?Golang Kill使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Kill函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestMachinerStorageAttached
func (s *MachinerSuite) TestMachinerStorageAttached(c *gc.C) {
// Machine is dying. We'll respond to "EnsureDead" by
// saying that there are still storage attachments;
// this should not cause an error.
s.accessor.machine.life = params.Dying
s.accessor.machine.SetErrors(
nil, // SetMachineAddresses
nil, // SetStatus
nil, // Watch
nil, // Refresh
nil, // SetStatus
¶ms.Error{Code: params.CodeMachineHasAttachedStorage},
)
worker := machiner.NewMachiner(s.accessor, s.agentConfig, false)
s.accessor.machine.watcher.changes <- struct{}{}
worker.Kill()
c.Check(worker.Wait(), jc.ErrorIsNil)
s.accessor.CheckCalls(c, []gitjujutesting.StubCall{{
FuncName: "Machine",
Args: []interface{}{s.agentConfig.Tag()},
}})
s.accessor.machine.watcher.CheckCalls(c, []gitjujutesting.StubCall{
{FuncName: "Changes"}, {FuncName: "Changes"}, {FuncName: "Stop"},
})
s.accessor.machine.CheckCalls(c, []gitjujutesting.StubCall{{
FuncName: "SetMachineAddresses",
Args: []interface{}{
network.NewAddresses(
"255.255.255.255",
"0.0.0.0",
),
},
}, {
FuncName: "SetStatus",
Args: []interface{}{
params.StatusStarted,
"",
map[string]interface{}(nil),
},
}, {
FuncName: "Watch",
}, {
FuncName: "Refresh",
}, {
FuncName: "Life",
}, {
FuncName: "SetStatus",
Args: []interface{}{
params.StatusStopped,
"",
map[string]interface{}(nil),
},
}, {
FuncName: "EnsureDead",
}})
}
示例2: TestUpdateEnvironConfig
func (s *storageProvisionerSuite) TestUpdateEnvironConfig(c *gc.C) {
volumeAccessor := newMockVolumeAccessor()
volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
s.provider.volumeSourceFunc = func(envConfig *config.Config, sourceConfig *storage.Config) (storage.VolumeSource, error) {
c.Assert(envConfig, gc.NotNil)
c.Assert(sourceConfig, gc.NotNil)
c.Assert(envConfig.AllAttrs()["foo"], gc.Equals, "bar")
return nil, errors.New("zinga")
}
args := &workerArgs{volumes: volumeAccessor}
worker := newStorageProvisioner(c, args)
defer worker.Wait()
defer worker.Kill()
newConfig, err := args.environ.cfg.Apply(map[string]interface{}{"foo": "bar"})
c.Assert(err, jc.ErrorIsNil)
args.environ.watcher.changes <- struct{}{}
args.environ.setConfig(newConfig)
args.environ.watcher.changes <- struct{}{}
args.volumes.volumesWatcher.changes <- []string{"1", "2"}
err = worker.Wait()
c.Assert(err, gc.ErrorMatches, `processing pending volumes: creating volumes: getting volume source: getting storage source "dummy": zinga`)
}
示例3: TestFilesystemAdded
func (s *storageProvisionerSuite) TestFilesystemAdded(c *gc.C) {
expectedFilesystems := []params.Filesystem{{
FilesystemTag: "filesystem-1",
Info: params.FilesystemInfo{
FilesystemId: "id-1",
Size: 1024,
},
}, {
FilesystemTag: "filesystem-2",
Info: params.FilesystemInfo{
FilesystemId: "id-2",
Size: 1024,
},
}}
filesystemInfoSet := make(chan interface{})
filesystemAccessor := newMockFilesystemAccessor()
filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) {
defer close(filesystemInfoSet)
c.Assert(filesystems, jc.SameContents, expectedFilesystems)
return nil, nil
}
args := &workerArgs{filesystems: filesystemAccessor}
worker := newStorageProvisioner(c, args)
defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
defer worker.Kill()
// The worker should create filesystems according to ids "1" and "2".
filesystemAccessor.filesystemsWatcher.changes <- []string{"1", "2"}
// ... but not until the environment config is available.
assertNoEvent(c, filesystemInfoSet, "filesystem info set")
args.environ.watcher.changes <- struct{}{}
waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set")
}
示例4: TestSetVolumeInfoErrorResultDoesNotStopWorker
func (s *storageProvisionerSuite) TestSetVolumeInfoErrorResultDoesNotStopWorker(c *gc.C) {
volumeAccessor := newMockVolumeAccessor()
volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) {
return []params.ErrorResult{{Error: ¶ms.Error{Message: "message", Code: "code"}}}, nil
}
args := &workerArgs{volumes: volumeAccessor}
worker := newStorageProvisioner(c, args)
defer func() {
err := worker.Wait()
c.Assert(err, jc.ErrorIsNil)
}()
defer worker.Kill()
done := make(chan interface{})
go func() {
defer close(done)
worker.Wait()
}()
args.volumes.volumesWatcher.changes <- []string{"1"}
args.environ.watcher.changes <- struct{}{}
assertNoEvent(c, done, "worker exited")
}
示例5: runWorker
// runWorker starts the supplied manifold's worker and communicates it back to the
// loop goroutine; waits for worker completion; and communicates any error encountered
// back to the loop goroutine. It must not be run on the loop goroutine.
func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, getResource GetResourceFunc) {
startWorkerAndWait := func() error {
logger.Infof("starting %q manifold worker in %s...", name, delay)
select {
case <-time.After(delay):
case <-engine.tomb.Dying():
logger.Debugf("not starting %q manifold worker (shutting down)", name)
return tomb.ErrDying
}
logger.Debugf("starting %q manifold worker", name)
worker, err := start(getResource)
if err != nil {
logger.Warningf("failed to start %q manifold worker: %v", name, err)
return err
}
logger.Debugf("running %q manifold worker", name)
select {
case <-engine.tomb.Dying():
logger.Debugf("stopping %q manifold worker (shutting down)", name)
worker.Kill()
case engine.started <- startedTicket{name, worker}:
logger.Debugf("registered %q manifold worker", name)
}
return worker.Wait()
}
// We may or may not send on started, but we *must* send on stopped.
engine.stopped <- stoppedTicket{name, startWorkerAndWait()}
}
示例6: upgradeWaiterWorker
// upgradeWaiterWorker runs the specified worker after upgrades have completed.
func (a *MachineAgent) upgradeWaiterWorker(start func() (worker.Worker, error)) worker.Worker {
return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
// Wait for the upgrade to complete (or for us to be stopped).
select {
case <-stop:
return nil
case <-a.upgradeWorkerContext.UpgradeComplete:
}
// Upgrades are done, start the worker.
worker, err := start()
if err != nil {
return err
}
// Wait for worker to finish or for us to be stopped.
waitCh := make(chan error)
go func() {
waitCh <- worker.Wait()
}()
select {
case err := <-waitCh:
return err
case <-stop:
worker.Kill()
}
return <-waitCh // Ensure worker has stopped before returning.
})
}
示例7: runWorker
// runWorker starts the supplied manifold's worker and communicates it back to the
// loop goroutine; waits for worker completion; and communicates any error encountered
// back to the loop goroutine. It must not be run on the loop goroutine.
func (engine *Engine) runWorker(name string, delay time.Duration, start StartFunc, context *context) {
errAborted := errors.New("aborted before delay elapsed")
startAfterDelay := func() (worker.Worker, error) {
// NOTE: the context will expire *after* the worker is started.
// This is tolerable because
// 1) we'll still correctly block access attempts most of the time
// 2) failing to block them won't cause data races anyway
// 3) it's not worth complicating the interface for every client just
// to eliminate the possibility of one harmlessly dumb interaction.
defer context.expire()
logger.Tracef("starting %q manifold worker in %s...", name, delay)
select {
case <-engine.tomb.Dying():
return nil, errAborted
case <-context.Abort():
return nil, errAborted
// TODO(fwereade): 2016-03-17 lp:1558657
case <-time.After(delay):
}
logger.Tracef("starting %q manifold worker", name)
return start(context)
}
startWorkerAndWait := func() error {
worker, err := startAfterDelay()
switch errors.Cause(err) {
case errAborted:
return nil
case nil:
logger.Tracef("running %q manifold worker", name)
default:
logger.Tracef("failed to start %q manifold worker: %v", name, err)
return err
}
select {
case <-engine.tomb.Dying():
logger.Tracef("stopping %q manifold worker (shutting down)", name)
// Doesn't matter whether worker == engine: if we're already Dying
// then cleanly Kill()ing ourselves again won't hurt anything.
worker.Kill()
case engine.started <- startedTicket{name, worker, context.accessLog}:
logger.Tracef("registered %q manifold worker", name)
}
if worker == engine {
// We mustn't Wait() for ourselves to complete here, or we'll
// deadlock. But we should wait until we're Dying, because we
// need this func to keep running to keep the self manifold
// accessible as a resource.
<-engine.tomb.Dying()
return tomb.ErrDying
}
return worker.Wait()
}
// We may or may not send on started, but we *must* send on stopped.
engine.stopped <- stoppedTicket{name, startWorkerAndWait(), context.accessLog}
}
示例8: TestDestroyVolumes
func (s *storageProvisionerSuite) TestDestroyVolumes(c *gc.C) {
provisionedVolume := names.NewVolumeTag("1")
unprovisionedVolume := names.NewVolumeTag("2")
volumeAccessor := newMockVolumeAccessor()
volumeAccessor.provisionVolume(provisionedVolume)
life := func(tags []names.Tag) ([]params.LifeResult, error) {
results := make([]params.LifeResult, len(tags))
for i := range results {
results[i].Life = params.Dead
}
return results, nil
}
destroyedChan := make(chan interface{}, 1)
s.provider.destroyVolumesFunc = func(volumeIds []string) []error {
destroyedChan <- volumeIds
return make([]error, len(volumeIds))
}
removedChan := make(chan interface{}, 1)
remove := func(tags []names.Tag) ([]params.ErrorResult, error) {
removedChan <- tags
return make([]params.ErrorResult, len(tags)), nil
}
args := &workerArgs{
volumes: volumeAccessor,
life: &mockLifecycleManager{
life: life,
remove: remove,
},
}
worker := newStorageProvisioner(c, args)
defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
defer worker.Kill()
volumeAccessor.volumesWatcher.changes <- []string{
provisionedVolume.Id(),
unprovisionedVolume.Id(),
}
args.environ.watcher.changes <- struct{}{}
// Both volumes should be removed; the provisioned one
// should be deprovisioned first.
destroyed := waitChannel(c, destroyedChan, "waiting for volume to be deprovisioned")
assertNoEvent(c, destroyedChan, "volumes deprovisioned")
c.Assert(destroyed, jc.DeepEquals, []string{"vol-1"})
var removed []names.Tag
for len(removed) < 2 {
tags := waitChannel(c, removedChan, "waiting for volumes to be removed").([]names.Tag)
removed = append(removed, tags...)
}
c.Assert(removed, jc.SameContents, []names.Tag{provisionedVolume, unprovisionedVolume})
assertNoEvent(c, removedChan, "volumes removed")
}
示例9: setupWorkerTest
func (s *ManifoldSuite) setupWorkerTest(c *gc.C) worker.Worker {
worker, err := s.manifold.Start(s.resources.Context())
c.Check(err, jc.ErrorIsNil)
s.AddCleanup(func(c *gc.C) {
worker.Kill()
err := worker.Wait()
c.Check(err, jc.ErrorIsNil)
})
return worker
}
示例10: TestCreateVolumeCreatesAttachment
func (s *storageProvisionerSuite) TestCreateVolumeCreatesAttachment(c *gc.C) {
volumeAccessor := newMockVolumeAccessor()
volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
volumeAttachmentInfoSet := make(chan interface{})
volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) {
defer close(volumeAttachmentInfoSet)
return make([]params.ErrorResult, len(volumeAttachments)), nil
}
s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) {
volumeAccessor.provisionedAttachments[params.MachineStorageId{
MachineTag: args[0].Attachment.Machine.String(),
AttachmentTag: args[0].Attachment.Volume.String(),
}] = params.VolumeAttachment{
VolumeTag: args[0].Attachment.Volume.String(),
MachineTag: args[0].Attachment.Machine.String(),
}
return []storage.CreateVolumesResult{{
Volume: &storage.Volume{
Tag: args[0].Tag,
VolumeInfo: storage.VolumeInfo{
VolumeId: "vol-ume",
},
},
VolumeAttachment: &storage.VolumeAttachment{
Volume: args[0].Attachment.Volume,
Machine: args[0].Attachment.Machine,
},
}}, nil
}
attachVolumesCalled := make(chan interface{})
s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) {
defer close(attachVolumesCalled)
return nil, errors.New("should not be called")
}
args := &workerArgs{volumes: volumeAccessor}
worker := newStorageProvisioner(c, args)
defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
defer worker.Kill()
volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{
MachineTag: "machine-1", AttachmentTag: "volume-1",
}}
assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set")
// The worker should create volumes according to ids "1".
volumeAccessor.volumesWatcher.changes <- []string{"1"}
args.environ.watcher.changes <- struct{}{}
waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set")
assertNoEvent(c, attachVolumesCalled, "AttachVolumes called")
}
示例11: TestAttachVolumeBackedFilesystem
func (s *storageProvisionerSuite) TestAttachVolumeBackedFilesystem(c *gc.C) {
infoSet := make(chan interface{})
filesystemAccessor := newMockFilesystemAccessor()
filesystemAccessor.setFilesystemAttachmentInfo = func(attachments []params.FilesystemAttachment) ([]params.ErrorResult, error) {
infoSet <- attachments
return nil, nil
}
args := &workerArgs{
scope: names.NewMachineTag("0"),
filesystems: filesystemAccessor,
}
worker := newStorageProvisioner(c, args)
defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
defer worker.Kill()
filesystemAccessor.provisionedFilesystems["filesystem-0-0"] = params.Filesystem{
FilesystemTag: "filesystem-0-0",
VolumeTag: "volume-0-0",
Info: params.FilesystemInfo{
FilesystemId: "whatever",
Size: 123,
},
}
filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0")
args.volumes.blockDevices[params.MachineStorageId{
MachineTag: "machine-0",
AttachmentTag: "volume-0-0",
}] = storage.BlockDevice{
DeviceName: "xvdf1",
Size: 123,
}
filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{
MachineTag: "machine-0",
AttachmentTag: "filesystem-0-0",
}}
assertNoEvent(c, infoSet, "filesystem attachment info set")
args.environ.watcher.changes <- struct{}{}
filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0"}
info := waitChannel(
c, infoSet, "waiting for filesystem attachment info to be set",
).([]params.FilesystemAttachment)
c.Assert(info, jc.DeepEquals, []params.FilesystemAttachment{{
FilesystemTag: "filesystem-0-0",
MachineTag: "machine-0",
Info: params.FilesystemAttachmentInfo{
MountPoint: "/mnt/xvdf1",
ReadOnly: true,
},
}})
}
示例12: TestStartStop
func (s *storageProvisionerSuite) TestStartStop(c *gc.C) {
worker := storageprovisioner.NewStorageProvisioner(
coretesting.EnvironmentTag,
"dir",
newMockVolumeAccessor(),
newMockFilesystemAccessor(),
&mockLifecycleManager{},
newMockEnvironAccessor(c),
newMockMachineAccessor(c),
)
worker.Kill()
c.Assert(worker.Wait(), gc.IsNil)
}
示例13: setupWorkerTest
func (s *ManifoldSuite) setupWorkerTest(c *gc.C) worker.Worker {
worker, err := s.manifold.Start(s.getResource)
c.Check(err, jc.ErrorIsNil)
s.AddCleanup(func(c *gc.C) {
worker.Kill()
err := worker.Wait()
c.Check(err, jc.ErrorIsNil)
})
s.CheckCalls(c, []testing.StubCall{{
FuncName: "createLock",
Args: []interface{}{"/path/to/data/dir"},
}})
return worker
}
示例14: TestDestroyFilesystems
func (s *storageProvisionerSuite) TestDestroyFilesystems(c *gc.C) {
provisionedFilesystem := names.NewFilesystemTag("1")
unprovisionedFilesystem := names.NewFilesystemTag("2")
filesystemAccessor := newMockFilesystemAccessor()
filesystemAccessor.provisionFilesystem(provisionedFilesystem)
life := func(tags []names.Tag) ([]params.LifeResult, error) {
results := make([]params.LifeResult, len(tags))
for i := range results {
results[i].Life = params.Dead
}
return results, nil
}
removedChan := make(chan interface{}, 1)
remove := func(tags []names.Tag) ([]params.ErrorResult, error) {
removedChan <- tags
return make([]params.ErrorResult, len(tags)), nil
}
args := &workerArgs{
filesystems: filesystemAccessor,
life: &mockLifecycleManager{
life: life,
remove: remove,
},
}
worker := newStorageProvisioner(c, args)
defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
defer worker.Kill()
filesystemAccessor.filesystemsWatcher.changes <- []string{
provisionedFilesystem.Id(),
unprovisionedFilesystem.Id(),
}
args.environ.watcher.changes <- struct{}{}
// Both filesystems should be removed; the provisioned one
// *should* be deprovisioned first, but we don't currently
// have the ability to do so via the storage provider API.
var removed []names.Tag
for len(removed) < 2 {
tags := waitChannel(c, removedChan, "waiting for filesystems to be removed").([]names.Tag)
removed = append(removed, tags...)
}
c.Assert(removed, jc.SameContents, []names.Tag{provisionedFilesystem, unprovisionedFilesystem})
assertNoEvent(c, removedChan, "filesystems removed")
}
示例15: TestStatusWorkerStarts
// TestStatusWorkerStarts ensures that the manifold correctly sets up the connected worker.
func (s *PatchedManifoldSuite) TestStatusWorkerStarts(c *gc.C) {
var called bool
s.manifoldConfig.NewConnectedStatusWorker = func(cfg meterstatus.ConnectedConfig) (worker.Worker, error) {
called = true
return meterstatus.NewConnectedStatusWorker(cfg)
}
manifold := meterstatus.Manifold(s.manifoldConfig)
worker, err := manifold.Start(s.resources.Context())
c.Assert(called, jc.IsTrue)
c.Assert(err, jc.ErrorIsNil)
c.Assert(worker, gc.NotNil)
worker.Kill()
err = worker.Wait()
c.Assert(err, jc.ErrorIsNil)
s.stub.CheckCallNames(c, "MeterStatus", "RunHook", "WatchMeterStatus")
}