本文整理汇总了Golang中github.com/docker/distribution/registry/client/transport.NewHeaderRequestModifier函数的典型用法代码示例。如果您正苦于以下问题:Golang NewHeaderRequestModifier函数的具体用法?Golang NewHeaderRequestModifier怎么用?Golang NewHeaderRequestModifier使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewHeaderRequestModifier函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: DockerHeaders
// DockerHeaders returns request modifiers that ensure requests have
// the User-Agent header set to dockerUserAgent and that metaHeaders
// are added.
func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier {
modifiers := []transport.RequestModifier{
transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}),
}
if metaHeaders != nil {
modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))
}
return modifiers
}
示例2: DockerHeaders
// DockerHeaders returns request modifiers with a User-Agent and metaHeaders
func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier {
modifiers := []transport.RequestModifier{}
if userAgent != "" {
modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{
"User-Agent": []string{userAgent},
}))
}
if metaHeaders != nil {
modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))
}
return modifiers
}
示例3: New
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName
func New(params DriverParameters) (*Driver, error) {
auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{})
if err != nil {
return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err)
}
if !params.Secure {
params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1)
}
s3obj := s3.New(auth, params.Region)
if params.UserAgent != "" {
s3obj.Client = &http.Client{
Transport: transport.NewTransport(http.DefaultTransport,
transport.NewHeaderRequestModifier(http.Header{
http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent},
}),
),
}
}
if params.V4Auth {
s3obj.Signature = aws.V4Signature
} else {
if params.Region.Name == "eu-central-1" {
return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication")
}
}
bucket := s3obj.Bucket(params.Bucket)
// TODO Currently multipart uploads have no timestamps, so this would be unwise
// if you initiated a new s3driver while another one is running on the same bucket.
// multis, _, err := bucket.ListMulti("", "")
// if err != nil {
// return nil, err
// }
// for _, multi := range multis {
// err := multi.Abort()
// //TODO appropriate to do this error checking?
// if err != nil {
// return nil, err
// }
// }
d := &driver{
S3: s3obj,
Bucket: bucket,
ChunkSize: params.ChunkSize,
Encrypt: params.Encrypt,
RootDirectory: params.RootDirectory,
StorageClass: params.StorageClass,
}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}, nil
}
示例4: New
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName
func New(params DriverParameters) (*Driver, error) {
if !params.V4Auth &&
(params.RegionEndpoint == "" ||
strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) {
return nil, fmt.Errorf("On Amazon S3 this storage driver can only be used with v4 authentication")
}
awsConfig := aws.NewConfig()
var creds *credentials.Credentials
if params.RegionEndpoint == "" {
creds = credentials.NewChainCredentials([]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: params.AccessKey,
SecretAccessKey: params.SecretKey,
},
},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
})
} else {
creds = credentials.NewChainCredentials([]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: params.AccessKey,
SecretAccessKey: params.SecretKey,
},
},
&credentials.EnvProvider{},
})
awsConfig.WithS3ForcePathStyle(true)
awsConfig.WithEndpoint(params.RegionEndpoint)
}
awsConfig.WithCredentials(creds)
awsConfig.WithRegion(params.Region)
awsConfig.WithDisableSSL(!params.Secure)
if params.UserAgent != "" {
awsConfig.WithHTTPClient(&http.Client{
Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})),
})
}
s3obj := s3.New(session.New(awsConfig))
// enable S3 compatible signature v2 signing instead
if !params.V4Auth {
setv2Handlers(s3obj)
}
// TODO Currently multipart uploads have no timestamps, so this would be unwise
// if you initiated a new s3driver while another one is running on the same bucket.
// multis, _, err := bucket.ListMulti("", "")
// if err != nil {
// return nil, err
// }
// for _, multi := range multis {
// err := multi.Abort()
// //TODO appropriate to do this error checking?
// if err != nil {
// return nil, err
// }
// }
d := &driver{
S3: s3obj,
Bucket: params.Bucket,
ChunkSize: params.ChunkSize,
Encrypt: params.Encrypt,
KeyID: params.KeyID,
MultipartCopyChunkSize: params.MultipartCopyChunkSize,
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
RootDirectory: params.RootDirectory,
StorageClass: params.StorageClass,
ObjectAcl: params.ObjectAcl,
}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}, nil
}
示例5: New
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName
func New(params DriverParameters) (*Driver, error) {
awsConfig := aws.NewConfig()
if params.RegionEndpoint != "" {
awsConfig.WithS3ForcePathStyle(true)
awsConfig.WithEndpoint(params.RegionEndpoint)
}
creds := credentials.NewChainCredentials([]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: params.AccessKey,
SecretAccessKey: params.SecretKey,
},
},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
})
awsConfig.WithCredentials(creds)
awsConfig.WithRegion(params.Region)
awsConfig.WithDisableSSL(!params.Secure)
if params.UserAgent != "" {
awsConfig.WithHTTPClient(&http.Client{
Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})),
})
}
s3obj := s3.New(session.New(awsConfig))
// TODO Currently multipart uploads have no timestamps, so this would be unwise
// if you initiated a new s3driver while another one is running on the same bucket.
// multis, _, err := bucket.ListMulti("", "")
// if err != nil {
// return nil, err
// }
// for _, multi := range multis {
// err := multi.Abort()
// //TODO appropriate to do this error checking?
// if err != nil {
// return nil, err
// }
// }
d := &driver{
S3: s3obj,
Bucket: params.Bucket,
ChunkSize: params.ChunkSize,
Encrypt: params.Encrypt,
KeyID: params.KeyID,
RootDirectory: params.RootDirectory,
StorageClass: params.StorageClass,
ObjectAcl: params.ObjectAcl,
}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}, nil
}
示例6: Spool
// Spool downloades Docker images from Distribution, builds base layer for Porto container
func (b *Box) Spool(ctx context.Context, name string, opts isolate.Profile) (err error) {
defer apexctx.GetLogger(ctx).WithField("name", name).Trace("spool").Stop(&err)
profile, err := docker.ConvertProfile(opts)
if err != nil {
apexctx.GetLogger(ctx).WithError(err).WithField("name", name).Info("unbale to convert raw profile to Porto/Docker specific profile")
return err
}
if profile.Registry == "" {
apexctx.GetLogger(ctx).WithField("name", name).Error("Registry must be non empty")
return fmt.Errorf("Registry must be non empty")
}
portoConn, err := portoConnect()
if err != nil {
apexctx.GetLogger(ctx).WithError(err).WithField("name", name).Error("Porto connection error")
return err
}
named, err := reference.ParseNamed(filepath.Join(profile.Repository, profile.Repository, name))
if err != nil {
apexctx.GetLogger(ctx).WithError(err).WithField("name", name).Error("name is invalid")
return err
}
var tr http.RoundTripper
if registryAuth, ok := b.config.RegistryAuth[profile.Registry]; ok {
tr = transport.NewTransport(b.transport, transport.NewHeaderRequestModifier(http.Header{
"Authorization": []string{registryAuth},
}))
} else {
tr = b.transport
}
var registry = profile.Registry
if !strings.HasPrefix(registry, "http") {
registry = "https://" + registry
}
apexctx.GetLogger(ctx).Debugf("Image URI generated at spawn with data: %s and %s", registry, named)
repo, err := client.NewRepository(ctx, named, registry, tr)
if err != nil {
return err
}
tagDescriptor, err := repo.Tags(ctx).Get(ctx, engineref.GetTagFromNamedRef(named))
if err != nil {
return err
}
layerName := b.appLayerName(name)
digest := tagDescriptor.Digest.String()
if b.journal.In(layerName, digest) {
apexctx.GetLogger(ctx).WithField("name", name).Infof("layer %s has been found in the cache", digest)
return nil
}
manifests, err := repo.Manifests(ctx)
if err != nil {
return err
}
manifest, err := manifests.Get(ctx, tagDescriptor.Digest)
if err != nil {
return err
}
if err = portoConn.RemoveLayer(layerName); err != nil && !isEqualPortoError(err, portorpc.EError_LayerNotFound) {
return err
}
apexctx.GetLogger(ctx).WithField("name", name).Infof("create a layer %s in Porto with merge", layerName)
var order layersOrder
switch manifest.(type) {
case schema1.SignedManifest, *schema1.SignedManifest:
order = layerOrderV1
case schema2.DeserializedManifest, *schema2.DeserializedManifest:
order = layerOrderV2
default:
return fmt.Errorf("unknown manifest type %T", manifest)
}
for _, descriptor := range order(manifest.References()) {
blobPath, err := b.blobRepo.Get(ctx, repo, descriptor.Digest)
if err != nil {
return err
}
entry := apexctx.GetLogger(ctx).WithField("layer", layerName).Trace("ImportLayer with merge")
err = portoConn.ImportLayer(layerName, blobPath, true)
entry.Stop(&err)
if err != nil {
return err
}
}
b.journal.Insert(layerName, digest)
// NOTE: Not so fast, but it's important to debug
journalContent.Set(b.journal.String())
return nil
}