本文整理汇总了Golang中github.com/docker/distribution/digest.NewDigestFromHex函数的典型用法代码示例。如果您正苦于以下问题:Golang NewDigestFromHex函数的具体用法?Golang NewDigestFromHex怎么用?Golang NewDigestFromHex使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewDigestFromHex函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: revisions
// revisions returns all revisions with the specified name and tag.
func (ts *tagStore) revisions(tag string) ([]digest.Digest, error) {
manifestTagIndexPath, err := ts.pm.path(manifestTagIndexPathSpec{
name: ts.Name(),
tag: tag,
})
if err != nil {
return nil, err
}
// TODO(stevvooe): Need to append digest alg to get listing of revisions.
manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256")
entries, err := ts.driver.List(ts.repository.ctx, manifestTagIndexPath)
if err != nil {
return nil, err
}
var revisions []digest.Digest
for _, entry := range entries {
revisions = append(revisions, digest.NewDigestFromHex("sha256", path.Base(entry)))
}
return revisions, nil
}
示例2: convertTarget
func convertTarget(t client.Target) (target, error) {
h, ok := t.Hashes["sha256"]
if !ok {
return target{}, errors.New("no valid hash, expecting sha256")
}
return target{
reference: registry.ParseReference(t.Name),
digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)),
size: t.Length,
}, nil
}
示例3: digestFromPath
// Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) {
digestPath = strings.TrimSuffix(digestPath, "/data")
dir, hex := path.Split(digestPath)
dir = path.Dir(dir)
dir, next := path.Split(dir)
// next is either the algorithm OR the first two characters in the hex string
var algo string
if next == hex[:2] {
algo = path.Base(dir)
} else {
algo = next
}
dgst := digest.NewDigestFromHex(algo, hex)
return dgst, dgst.Validate()
}
示例4: Walk
// Walk calls the supplied callback for each image ID in the storage backend.
func (s *fs) Walk(f IDWalkFunc) error {
// Only Canonical digest (sha256) is currently supported
s.RLock()
dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
s.RUnlock()
if err != nil {
return err
}
for _, v := range dir {
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
continue
}
if err := f(ID(dgst)); err != nil {
return err
}
}
return nil
}
示例5: List
func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
var ids []ChainID
for _, algorithm := range supportedAlgorithms {
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm)))
if err != nil {
if os.IsNotExist(err) {
continue
}
return nil, nil, err
}
for _, fi := range fileInfos {
if fi.IsDir() && fi.Name() != "mounts" {
dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
} else {
ids = append(ids, ChainID(dgst))
}
}
}
}
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts"))
if err != nil {
if os.IsNotExist(err) {
return ids, []string{}, nil
}
return nil, nil, err
}
var mounts []string
for _, fi := range fileInfos {
if fi.IsDir() {
mounts = append(mounts, fi.Name())
}
}
return ids, mounts, nil
}
示例6: Walk
func (cs *ContentStore) Walk(fn func(path string, dgst digest.Digest) error) error {
root := filepath.Join(cs.root, "blobs")
var alg digest.Algorithm
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if !fi.IsDir() && !alg.Available() {
return nil
}
// TODO(stevvooe): There are few more cases with subdirs that should be
// handled in case the layout gets corrupted. This isn't strict enough
// an may spew bad data.
if path == root {
return nil
} else if filepath.Dir(path) == root {
alg = digest.Algorithm(filepath.Base(path))
if !alg.Available() {
alg = ""
return filepath.SkipDir
}
// descending into a hash directory
return nil
}
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
if err := dgst.Validate(); err != nil {
// log error but don't report
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
// if we see this, it could mean some sort of corruption of the
// store or extra paths not expected previously.
}
return fn(path, dgst)
})
}
示例7: trustedResolveDigest
func trustedResolveDigest(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (distreference.Canonical, error) {
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return nil, err
}
authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index)
notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull")
if err != nil {
return nil, errors.Wrap(err, "error establishing connection to trust repository")
}
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
if err != nil {
return nil, trust.NotaryError(repoInfo.FullName(), err)
}
// Only get the tag if it's in the top level targets role or the releases delegation role
// ignore it if it's in any other delegation roles
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String()))
}
logrus.Debugf("retrieving target for %s role\n", t.Role)
h, ok := t.Hashes["sha256"]
if !ok {
return nil, errors.New("no valid hash, expecting sha256")
}
dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h))
// Using distribution reference package to make sure that adding a
// digest does not erase the tag. When the two reference packages
// are unified, this will no longer be an issue.
return distreference.WithDigest(ref, dgst)
}
示例8: makeBlobStoreWalkFunc
// makeBlobStoreWalkFunc returns a function for walking a blob store at
// particular rootPath. The returned function calls a given ingest callback on
// each digest found. The blob store is expected to have following layout:
//
// if multilevel is true:
// <rootPath>/<alg>/<prefix>/<digest>
// <rootPath>/tarsum/<version>/<alg>/<prefix>/<digest>
// otherwise:
// <rootPath>/<alg>/<digest>
// <rootPath>/tarsum/<version>/<alg>/<digest>
func makeBlobStoreWalkFunc(rootPath string, multilevel bool, ingest func(digest.Digest) error) (WalkFn, error) {
var (
// number of slashes in a path to a full digest directory under a rootPath
blobRefPathSepCount int
blobTarsumRefPathSepCount int
)
if multilevel {
// <alg>/<prefix>/<digest>
blobRefPathSepCount = 2
// tarsum/<version>/<alg>/<prefix>/<digest>
blobTarsumRefPathSepCount = 4
} else {
// <alg>/<digest>
blobRefPathSepCount = 1
// tarsum/<version>/<alg>/<digest>
blobTarsumRefPathSepCount = 3
}
return func(fi storageDriver.FileInfo) error {
if !fi.IsDir() {
// ignore files
return nil
}
// trim <from>/ prefix
pth := strings.TrimPrefix(strings.TrimPrefix(fi.Path(), rootPath), "/")
sepCount := strings.Count(pth, "/")
if sepCount < blobRefPathSepCount {
// don't bother finding digests in a too short path
return nil
}
alg := ""
tarsumParts := reTarsumPrefix.FindStringSubmatch(pth)
isTarsum := len(tarsumParts) > 0
if sepCount > blobTarsumRefPathSepCount || (!isTarsum && sepCount > blobRefPathSepCount) {
// too many path components
return ErrSkipDir
}
if len(tarsumParts) > 0 {
alg = "tarsum." + tarsumParts[1] + "+"
// trim "tarsum/<version>/" prefix from path
pth = strings.TrimPrefix(pth[len(tarsumParts[0]):], "/")
}
digestParts := reDigestPath.FindStringSubmatch(pth)
if len(digestParts) > 0 {
alg += digestParts[1]
dgstHex := digestParts[2]
dgst := digest.NewDigestFromHex(alg, dgstHex)
// append only valid digests
if err := dgst.Validate(); err == nil {
err := ingest(dgst)
if err != nil {
return ErrFinishedWalk
}
}
return ErrSkipDir
}
return nil
}, nil
}