本文整理汇总了Golang中github.com/docker/docker/pkg/progress.ChanOutput函数的典型用法代码示例。如果您正苦于以下问题:Golang ChanOutput函数的具体用法?Golang ChanOutput怎么用?Golang ChanOutput使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ChanOutput函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: pullImageWithReference
func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
ctx, cancelFunc := context.WithCancel(ctx)
go func() {
writeDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
imagePullConfig := &distribution.ImagePullConfig{
MetaHeaders: metaHeaders,
AuthConfig: authConfig,
ProgressOutput: progress.ChanOutput(progressChan),
RegistryService: daemon.RegistryService,
ImageEventLogger: daemon.LogImageEvent,
MetadataStore: daemon.distributionMetadataStore,
ImageStore: daemon.imageStore,
ReferenceStore: daemon.referenceStore,
DownloadManager: daemon.downloadManager,
}
err := distribution.Pull(ctx, ref, imagePullConfig)
close(progressChan)
<-writesDone
return err
}
示例2: TestCancelledUpload
func TestCancelledUpload(t *testing.T) {
lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
go func() {
for range progressChan {
}
close(progressDone)
}()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-time.After(time.Millisecond)
cancel()
}()
descriptors := uploadDescriptors(nil)
err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
if err != context.Canceled {
t.Fatal("expected upload to be cancelled")
}
close(progressChan)
<-progressDone
}
示例3: pull
func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
if outStream != nil {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
defer func() {
close(progressChan)
<-writesDone
}()
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
go func() {
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
config.ProgressOutput = progress.ChanOutput(progressChan)
} else {
config.ProgressOutput = progress.DiscardOutput()
}
return distribution.Pull(ctx, ref, config)
}
示例4: TestSuccessfulUpload
func TestSuccessfulUpload(t *testing.T) {
lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
receivedProgress := make(map[string]int64)
go func() {
for p := range progressChan {
receivedProgress[p.ID] = p.Current
}
close(progressDone)
}()
var currentUploads int32
descriptors := uploadDescriptors(¤tUploads)
err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
if err != nil {
t.Fatalf("upload error: %v", err)
}
close(progressChan)
<-progressDone
}
示例5: TestCancelledDownload
func TestCancelledDownload(t *testing.T) {
ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency)
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
go func() {
for range progressChan {
}
close(progressDone)
}()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-time.After(time.Millisecond)
cancel()
}()
descriptors := downloadDescriptors(nil)
_, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan))
if err != context.Canceled {
t.Fatal("expected download to be cancelled")
}
close(progressChan)
<-progressDone
}
示例6: PushImage
// PushImage initiates a push operation on the repository named localName.
func (daemon *Daemon) PushImage(ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
ctx, cancelFunc := context.WithCancel(context.Background())
go func() {
writeDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
imagePushConfig := &distribution.ImagePushConfig{
MetaHeaders: metaHeaders,
AuthConfig: authConfig,
ProgressOutput: progress.ChanOutput(progressChan),
RegistryService: daemon.RegistryService,
EventsService: daemon.EventsService,
MetadataStore: daemon.distributionMetadataStore,
LayerStore: daemon.layerStore,
ImageStore: daemon.imageStore,
TagStore: daemon.tagStore,
TrustKey: daemon.trustKey,
UploadManager: daemon.uploadManager,
}
err := distribution.Push(ctx, ref, imagePushConfig)
close(progressChan)
<-writesDone
return err
}
示例7: TestTransfer
func TestTransfer(t *testing.T) {
makeXferFunc := func(id string) DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
select {
case <-start:
default:
t.Fatalf("transfer function not started even though concurrency limit not reached")
}
xfer := NewTransfer()
go func() {
for i := 0; i <= 10; i++ {
progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
time.Sleep(10 * time.Millisecond)
}
close(progressChan)
}()
return xfer
}
}
tm := NewTransferManager(5)
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
receivedProgress := make(map[string]int64)
go func() {
for p := range progressChan {
val, present := receivedProgress[p.ID]
if present && p.Current <= val {
t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1)
}
receivedProgress[p.ID] = p.Current
}
close(progressDone)
}()
// Start a few transfers
ids := []string{"id1", "id2", "id3"}
xfers := make([]Transfer, len(ids))
watchers := make([]*Watcher, len(ids))
for i, id := range ids {
xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
}
for i, xfer := range xfers {
<-xfer.Done()
xfer.Release(watchers[i])
}
close(progressChan)
<-progressDone
for _, id := range ids {
if receivedProgress[id] != 10 {
t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
}
}
}
示例8: TestConcurrencyLimit
func TestConcurrencyLimit(t *testing.T) {
concurrencyLimit := 3
var runningJobs int32
makeXferFunc := func(id string) DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
xfer := NewTransfer()
go func() {
<-start
totalJobs := atomic.AddInt32(&runningJobs, 1)
if int(totalJobs) > concurrencyLimit {
t.Fatalf("too many jobs running")
}
for i := 0; i <= 10; i++ {
progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
time.Sleep(10 * time.Millisecond)
}
atomic.AddInt32(&runningJobs, -1)
close(progressChan)
}()
return xfer
}
}
tm := NewTransferManager(concurrencyLimit)
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
receivedProgress := make(map[string]int64)
go func() {
for p := range progressChan {
receivedProgress[p.ID] = p.Current
}
close(progressDone)
}()
// Start more transfers than the concurrency limit
ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"}
xfers := make([]Transfer, len(ids))
watchers := make([]*Watcher, len(ids))
for i, id := range ids {
xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
}
for i, xfer := range xfers {
<-xfer.Done()
xfer.Release(watchers[i])
}
close(progressChan)
<-progressDone
for _, id := range ids {
if receivedProgress[id] != 10 {
t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
}
}
}
示例9: TestWatchFinishedTransfer
func TestWatchFinishedTransfer(t *testing.T) {
makeXferFunc := func(id string) DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
xfer := NewTransfer()
go func() {
// Finish immediately
close(progressChan)
}()
return xfer
}
}
tm := NewTransferManager(5)
// Start a transfer
watchers := make([]*Watcher, 3)
var xfer Transfer
xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress)))
// Give it a watcher immediately
watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress)))
// Wait for the transfer to complete
<-xfer.Done()
// Set up another watcher
watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress)))
// Release the watchers
for _, w := range watchers {
xfer.Release(w)
}
// Now that all watchers have been released, Released() should
// return a closed channel.
<-xfer.Released()
}
示例10: PushImage
// PushImage initiates a push operation on the repository named localName.
func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
ref, err := reference.ParseNamed(image)
if err != nil {
return err
}
if tag != "" {
// Push by digest is not supported, so only tags are supported.
ref, err = reference.WithTag(ref, tag)
if err != nil {
return err
}
}
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
ctx, cancelFunc := context.WithCancel(ctx)
go func() {
writeDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
imagePushConfig := &distribution.ImagePushConfig{
Config: distribution.Config{
MetaHeaders: metaHeaders,
AuthConfig: authConfig,
ProgressOutput: progress.ChanOutput(progressChan),
RegistryService: daemon.RegistryService,
ImageEventLogger: daemon.LogImageEvent,
MetadataStore: daemon.distributionMetadataStore,
ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore),
ReferenceStore: daemon.referenceStore,
},
ConfigMediaType: schema2.MediaTypeImageConfig,
LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore),
TrustKey: daemon.trustKey,
UploadManager: daemon.uploadManager,
}
err = distribution.Push(ctx, ref, imagePushConfig)
close(progressChan)
<-writesDone
return err
}
示例11: TestSuccessfulUpload
func TestSuccessfulUpload(t *testing.T) {
lum := NewLayerUploadManager(maxUploadConcurrency)
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
receivedProgress := make(map[string]int64)
go func() {
for p := range progressChan {
receivedProgress[p.ID] = p.Current
}
close(progressDone)
}()
var currentUploads int32
descriptors := uploadDescriptors(¤tUploads)
digests, err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
if err != nil {
t.Fatalf("upload error: %v", err)
}
close(progressChan)
<-progressDone
if len(digests) != len(expectedDigests) {
t.Fatal("wrong number of keys in digests map")
}
for key, val := range expectedDigests {
if digests[key] != val {
t.Fatalf("mismatch in digest array for key %v (expected %v, got %v)", key, val, digests[key])
}
if receivedProgress[key.String()] != 10 {
t.Fatalf("missing or wrong progress output for %v", key)
}
}
}
示例12: TestDuplicateTransfer
func TestDuplicateTransfer(t *testing.T) {
ready := make(chan struct{})
var xferFuncCalls int32
makeXferFunc := func(id string) DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
atomic.AddInt32(&xferFuncCalls, 1)
xfer := NewTransfer()
go func() {
defer func() {
close(progressChan)
}()
<-ready
for i := int64(0); ; i++ {
select {
case <-time.After(10 * time.Millisecond):
case <-xfer.Context().Done():
return
}
progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
}
}()
return xfer
}
}
tm := NewTransferManager(5)
type transferInfo struct {
xfer Transfer
watcher *Watcher
progressChan chan progress.Progress
progressDone chan struct{}
receivedFirstProgress chan struct{}
}
progressConsumer := func(t transferInfo) {
first := true
for range t.progressChan {
if first {
close(t.receivedFirstProgress)
}
first = false
}
close(t.progressDone)
}
// Try to start multiple transfers with the same ID
transfers := make([]transferInfo, 5)
for i := range transfers {
t := &transfers[i]
t.progressChan = make(chan progress.Progress)
t.progressDone = make(chan struct{})
t.receivedFirstProgress = make(chan struct{})
t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan))
go progressConsumer(*t)
}
// Allow the transfer goroutine to proceed.
close(ready)
// Confirm that each watcher gets progress output.
for _, t := range transfers {
<-t.receivedFirstProgress
}
// Confirm that the transfer function was called exactly once.
if xferFuncCalls != 1 {
t.Fatal("transfer function wasn't called exactly once")
}
// Release one watcher every 5ms
for _, t := range transfers {
t.xfer.Release(t.watcher)
<-time.After(5 * time.Millisecond)
}
for _, t := range transfers {
// Now that all watchers have been released, Released() should
// return a closed channel.
<-t.xfer.Released()
// Done() should return a closed channel because the xfer func returned
// due to cancellation.
<-t.xfer.Done()
}
for _, t := range transfers {
close(t.progressChan)
<-t.progressDone
}
}
示例13: TestWatchRelease
func TestWatchRelease(t *testing.T) {
ready := make(chan struct{})
makeXferFunc := func(id string) DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
xfer := NewTransfer()
go func() {
defer func() {
close(progressChan)
}()
<-ready
for i := int64(0); ; i++ {
select {
case <-time.After(10 * time.Millisecond):
case <-xfer.Context().Done():
return
}
progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
}
}()
return xfer
}
}
tm := NewTransferManager(5)
type watcherInfo struct {
watcher *Watcher
progressChan chan progress.Progress
progressDone chan struct{}
receivedFirstProgress chan struct{}
}
progressConsumer := func(w watcherInfo) {
first := true
for range w.progressChan {
if first {
close(w.receivedFirstProgress)
}
first = false
}
close(w.progressDone)
}
// Start a transfer
watchers := make([]watcherInfo, 5)
var xfer Transfer
watchers[0].progressChan = make(chan progress.Progress)
watchers[0].progressDone = make(chan struct{})
watchers[0].receivedFirstProgress = make(chan struct{})
xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan))
go progressConsumer(watchers[0])
// Give it multiple watchers
for i := 1; i != len(watchers); i++ {
watchers[i].progressChan = make(chan progress.Progress)
watchers[i].progressDone = make(chan struct{})
watchers[i].receivedFirstProgress = make(chan struct{})
watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan))
go progressConsumer(watchers[i])
}
// Now that the watchers are set up, allow the transfer goroutine to
// proceed.
close(ready)
// Confirm that each watcher gets progress output.
for _, w := range watchers {
<-w.receivedFirstProgress
}
// Release one watcher every 5ms
for _, w := range watchers {
xfer.Release(w.watcher)
<-time.After(5 * time.Millisecond)
}
// Now that all watchers have been released, Released() should
// return a closed channel.
<-xfer.Released()
// Done() should return a closed channel because the xfer func returned
// due to cancellation.
<-xfer.Done()
for _, w := range watchers {
close(w.progressChan)
<-w.progressDone
}
}
示例14: Push
// Push pushes a plugin to the store.
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
p, err := pm.config.Store.GetV2Plugin(name)
if err != nil {
return err
}
ref, err := reference.ParseNamed(p.Name())
if err != nil {
return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
}
var po progress.Output
if outStream != nil {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
defer func() {
close(progressChan)
<-writesDone
}()
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
go func() {
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
po = progress.ChanOutput(progressChan)
} else {
po = progress.DiscardOutput()
}
// TODO: replace these with manager
is := &pluginConfigStore{
pm: pm,
plugin: p,
}
ls := &pluginLayerProvider{
pm: pm,
plugin: p,
}
rs := &pluginReference{
name: ref,
pluginID: p.Config,
}
uploadManager := xfer.NewLayerUploadManager(3)
imagePushConfig := &distribution.ImagePushConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
ProgressOutput: po,
RegistryService: pm.config.RegistryService,
ReferenceStore: rs,
ImageEventLogger: pm.config.LogPluginEvent,
ImageStore: is,
RequireSchema2: true,
},
ConfigMediaType: schema2.MediaTypePluginConfig,
LayerStore: ls,
UploadManager: uploadManager,
}
return distribution.Push(ctx, ref, imagePushConfig)
}
示例15: makeDownloadFunc
// makeDownloadFunc returns a func used by xfer.TransferManager to download a layer
func (ldm *LayerDownloader) makeDownloadFunc(layer *ImageWithMeta, ic *ImageC, parentDownload *downloadTransfer, layers []*ImageWithMeta) xfer.DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) xfer.Transfer {
d := &downloadTransfer{
Transfer: xfer.NewTransfer(),
layer: layer,
}
go func() {
defer func() {
close(progressChan)
// remove layer from cache if there was an error attempting to download
if d.err != nil {
LayerCache().Remove(layer.ID)
}
}()
progressOutput := progress.ChanOutput(progressChan)
// wait for TransferManager to give the go-ahead
select {
case <-start:
default:
progress.Update(progressOutput, layer.String(), "Waiting")
<-start
}
if parentDownload != nil {
// bail if parent download failed or was cancelled
select {
case <-parentDownload.Done():
if err := parentDownload.result(); err != nil {
d.err = err
return
}
default:
}
}
// fetch blob
diffID, err := FetchImageBlob(d.Transfer.Context(), ic.Options, layer, progressOutput)
if err != nil {
d.err = fmt.Errorf("%s/%s returned %s", ic.Image, layer.ID, err)
return
}
layer.DiffID = diffID
close(inactive)
if parentDownload != nil {
select {
case <-d.Transfer.Context().Done():
d.err = errors.New("layer download cancelled")
return
default:
<-parentDownload.Done() // block until parent download completes
}
if err := parentDownload.result(); err != nil {
d.err = err
return
}
}
// is this the leaf layer?
imageLayer := layer.ID == layers[0].ID
// if this is the leaf layer, we are done and can now create the image config
if imageLayer {
imageConfig, err := ic.CreateImageConfig(layers)
if err != nil {
d.err = err
return
}
// cache and persist the image
cache.ImageCache().Add(&imageConfig)
cache.ImageCache().Save()
// place calculated ImageID in struct
ic.ImageID = imageConfig.ImageID
if err = updateRepositoryCache(ic); err != nil {
d.err = err
return
}
}
ldm.m.Lock()
defer ldm.m.Unlock()
// Write blob to the storage layer
if err := ic.WriteImageBlob(layer, progressOutput, imageLayer); err != nil {
d.err = err
return
//.........这里部分代码省略.........