本文整理汇总了Golang中github.com/docker/docker/registry.Session.GetRemoteHistory方法的典型用法代码示例。如果您正苦于以下问题:Golang Session.GetRemoteHistory方法的具体用法?Golang Session.GetRemoteHistory怎么用?Golang Session.GetRemoteHistory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/docker/docker/registry.Session
的用法示例。
在下文中一共展示了Session.GetRemoteHistory方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: pullAndMergeImage
func (s *TagStore) pullAndMergeImage(r *registry.Session, out io.Writer, containerID, containerImage, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) {
newHistory, err := r.GetRemoteHistory(imgID, endpoint, token)
if err != nil {
return false, err
}
oldHistory, err := r.GetRemoteHistory(containerImage, endpoint, token)
if err != nil {
return false, err
}
// Compare the differences between the two image
compareHistory := make(map[string]string, len(oldHistory))
for _, id := range oldHistory {
compareHistory[id] = id
}
var history []string
for _, id := range newHistory {
if _, ok := compareHistory[id]; !ok {
history = append(history, id)
}
}
layers_downloaded := false
for i := len(history) - 1; i >= 0; i-- {
id := history[i]
// ensure no two downloads of the same layer happen at the same time
if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
<-c
}
defer s.poolRemove("pull", "layer:"+id)
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
var (
imgJSON []byte
imgSize int
err error
img *image.Image
)
retries := 5
for j := 1; j <= retries; j++ {
imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
if err != nil && j == retries {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
}
img, err = image.NewImgJSON(imgJSON)
layers_downloaded = true
if err != nil && j == retries {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else {
break
}
}
for j := 1; j <= retries; j++ {
// Get the layer
status := "Pulling fs layer"
if j > 1 {
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
}
out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
if uerr, ok := err.(*url.Error); ok {
err = uerr.Err
}
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err
}
layers_downloaded = true
defer layer.Close()
if !s.graph.Exists(id) {
// register when first pull layer
err = s.graph.Register(img, imgJSON,
utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"))
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
return layers_downloaded, err
}
}
// add layer to container
dest := path.Join(s.graph.Driver().MountPath(), containerID, "rootfs")
out.Write(sf.FormatProgress(utils.TruncateID(id), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil))
err = archive.ApplyLayer(dest, layer)
if err != nil && j == retries {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error merge layers", nil))
//.........这里部分代码省略.........
示例2: pullImage
func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) {
history, err := r.GetRemoteHistory(imgID, endpoint, token)
if err != nil {
return false, err
}
out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
// FIXME: Try to stream the images?
// FIXME: Launch the getRemoteImage() in goroutines
layers_downloaded := false
for i := len(history) - 1; i >= 0; i-- {
id := history[i]
// ensure no two downloads of the same layer happen at the same time
if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
<-c
}
defer s.poolRemove("pull", "layer:"+id)
if !s.graph.Exists(id) {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
var (
imgJSON []byte
imgSize int
err error
img *image.Image
)
retries := 5
for j := 1; j <= retries; j++ {
imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
if err != nil && j == retries {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
}
img, err = image.NewImgJSON(imgJSON)
layers_downloaded = true
if err != nil && j == retries {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else {
break
}
}
for j := 1; j <= retries; j++ {
// Get the layer
status := "Pulling fs layer"
if j > 1 {
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
}
out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
if uerr, ok := err.(*url.Error); ok {
err = uerr.Err
}
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err
}
layers_downloaded = true
defer layer.Close()
err = s.graph.Register(img,
utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"))
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
return layers_downloaded, err
} else {
break
}
}
}
out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
}
return layers_downloaded, nil
}
示例3: main
// TODO rewrite this whole PoC
func main() {
flag.Usage = func() {
flag.PrintDefaults()
}
flag.Parse()
if debug {
os.Setenv("DEBUG", "1")
log.SetLevel(log.DebugLevel)
}
if flag.NArg() == 0 {
fmt.Println("ERROR: no image names provided")
flag.Usage()
os.Exit(1)
}
// make tempDir
tempDir, err := ioutil.TempDir("", "docker-fetch-")
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer os.RemoveAll(tempDir)
fetcher := NewFetcher(tempDir)
sc := registry.NewServiceConfig(rOptions)
for _, arg := range flag.Args() {
remote, tagName := parsers.ParseRepositoryTag(arg)
if tagName == "" {
tagName = "latest"
}
repInfo, err := sc.NewRepositoryInfo(remote)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
log.Debugf("%#v %q\n", repInfo, tagName)
idx, err := repInfo.GetEndpoint()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "Pulling %s:%s from %s\n", repInfo.RemoteName, tagName, idx)
var session *registry.Session
if s, ok := fetcher.sessions[idx.String()]; ok {
session = s
} else {
// TODO(vbatts) obviously the auth and http factory shouldn't be nil here
session, err = registry.NewSession(nil, nil, idx, timeout)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
rd, err := session.GetRepositoryData(repInfo.RemoteName)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
log.Debugf("rd: %#v", rd)
// produce the "repositories" file for the archive
if _, ok := fetcher.repositories[repInfo.RemoteName]; !ok {
fetcher.repositories[repInfo.RemoteName] = graph.Repository{}
}
log.Debugf("repositories: %#v", fetcher.repositories)
if len(rd.Endpoints) == 0 {
log.Fatalf("expected registry endpoints, but received none from the index")
}
tags, err := session.GetRemoteTags(rd.Endpoints, repInfo.RemoteName, rd.Tokens)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if hash, ok := tags[tagName]; ok {
fetcher.repositories[repInfo.RemoteName][tagName] = hash
}
log.Debugf("repositories: %#v", fetcher.repositories)
imgList, err := session.GetRemoteHistory(fetcher.repositories[repInfo.RemoteName][tagName], rd.Endpoints[0], rd.Tokens)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
log.Debugf("imgList: %#v", imgList)
for _, imgID := range imgList {
// pull layers and jsons
buf, _, err := session.GetRemoteImageJSON(imgID, rd.Endpoints[0], rd.Tokens)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
//.........这里部分代码省略.........