本文整理汇总了Golang中camlistore/org/pkg/schema.NewCommonFileMap函数的典型用法代码示例。如果您正苦于以下问题:Golang NewCommonFileMap函数的具体用法?Golang NewCommonFileMap怎么用?Golang NewCommonFileMap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewCommonFileMap函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: uploadNode
func (up *Uploader) uploadNode(n *node) (*client.PutResult, error) {
fi := n.fi
mode := fi.Mode()
if mode&os.ModeType == 0 {
return up.uploadNodeRegularFile(n)
}
bb := schema.NewCommonFileMap(n.fullPath, fi)
switch {
case mode&os.ModeSymlink != 0:
// TODO(bradfitz): use VFS here; not os.Readlink
target, err := os.Readlink(n.fullPath)
if err != nil {
return nil, err
}
bb.SetSymlinkTarget(target)
case mode&os.ModeDevice != 0:
// including mode & os.ModeCharDevice
fallthrough
case mode&os.ModeSocket != 0:
fallthrough
case mode&os.ModeNamedPipe != 0: // FIFO
fallthrough
default:
return nil, fmt.Errorf("camput.files: unsupported file type %v for file %v", mode, n.fullPath)
case fi.IsDir():
ss := new(schema.StaticSet)
for _, c := range n.children {
pr, err := c.PutResult()
if err != nil {
return nil, fmt.Errorf("Error populating directory static set for child %q: %v", c.fullPath, err)
}
ss.Add(pr.BlobRef)
}
sspr, err := up.UploadBlob(ss)
if err != nil {
return nil, err
}
bb.PopulateDirectoryMap(sspr.BlobRef)
}
mappr, err := up.UploadBlob(bb)
if err == nil {
if !mappr.Skipped {
vlog.Printf("Uploaded %q, %s for %s", bb.Type(), mappr.BlobRef, n.fullPath)
}
} else {
vlog.Printf("Error uploading map for %s (%s, %s): %v", n.fullPath, bb.Type(), bb.Blob().BlobRef(), err)
}
return mappr, err
}
示例2: UploadFile
// UploadFile uploads the contents of the file, as well as a file blob with
// filename for these contents. If the contents or the file blob are found on
// the server, they're not uploaded.
//
// Note: this method is still a work in progress, and might change to accomodate
// the needs of camput file.
func (cl *Client) UploadFile(filename string, contents io.Reader, opts *FileUploadOptions) (blob.Ref, error) {
fileMap := schema.NewFileMap(filename)
if opts != nil && opts.FileInfo != nil {
fileMap = schema.NewCommonFileMap(filename, opts.FileInfo)
modTime := opts.FileInfo.ModTime()
if !modTime.IsZero() {
fileMap.SetModTime(modTime)
}
}
fileMap.SetType("file")
var wholeRef blob.Ref
if opts != nil && opts.WholeRef.Valid() {
wholeRef = opts.WholeRef
} else {
var buf bytes.Buffer
var err error
wholeRef, err = cl.wholeRef(io.TeeReader(contents, &buf))
if err != nil {
return blob.Ref{}, err
}
contents = io.MultiReader(&buf, contents)
}
// TODO(mpl): should we consider the case (not covered by fileMapFromDuplicate)
// where all the parts are there, but the file schema/blob does not exist? Can that
// even happen ? I'm naively assuming it can't for now, since that's what camput file
// does too.
fileRef, err := cl.fileMapFromDuplicate(fileMap, wholeRef)
if err != nil {
return blob.Ref{}, err
}
if fileRef.Valid() {
return fileRef, nil
}
return schema.WriteFileMap(cl, fileMap, contents)
}
示例3: uploadNodeRegularFile
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
var filebb *schema.Builder
if up.fileOpts.contentsOnly {
filebb = schema.NewFileMap("")
} else {
filebb = schema.NewCommonFileMap(n.fullPath, n.fi)
}
filebb.SetType("file")
up.fdGate.Start()
defer up.fdGate.Done()
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
if !up.fileOpts.contentsOnly {
if up.fileOpts.exifTime {
ra, ok := file.(io.ReaderAt)
if !ok {
return nil, errors.New("Error asserting local file to io.ReaderAt")
}
modtime, err := schema.FileTime(ra)
if err != nil {
log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
} else {
filebb.SetModTime(modtime)
}
}
if up.fileOpts.wantCapCtime() {
filebb.CapCreationTime()
}
}
var (
size = n.fi.Size()
fileContents io.Reader = io.LimitReader(file, size)
br blob.Ref // of file schemaref
sum string // sha1 hashsum of the file to upload
pr *client.PutResult // of the final "file" schema blob
)
const dupCheckThreshold = 256 << 10
if size > dupCheckThreshold {
sumRef, err := up.wholeFileDigest(n.fullPath)
if err == nil {
sum = sumRef.String()
ok := false
pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
if ok {
br = pr.BlobRef
android.NoteFileUploaded(n.fullPath, !pr.Skipped)
if up.fileOpts.wantVivify() {
// we can return early in that case, because the other options
// are disallowed in the vivify case.
return pr, nil
}
}
}
}
if up.fileOpts.wantVivify() {
// If vivify wasn't already done in fileMapFromDuplicate.
err := schema.WriteFileChunks(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
if err != nil {
return nil, err
}
json, err := filebb.JSON()
if err != nil {
return nil, err
}
br = blob.SHA1FromString(json)
h := &client.UploadHandle{
BlobRef: br,
Size: uint32(len(json)),
Contents: strings.NewReader(json),
Vivify: true,
}
pr, err = up.Upload(h)
if err != nil {
return nil, err
}
android.NoteFileUploaded(n.fullPath, true)
return pr, nil
}
if !br.Valid() {
// br still zero means fileMapFromDuplicate did not find the file on the server,
// and the file has not just been uploaded subsequently to a vivify request.
// So we do the full file + file schema upload here.
if sum == "" && up.fileOpts.wantFilePermanode() {
fileContents = &trackDigestReader{r: fileContents}
}
br, err = schema.WriteFileMap(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
if err != nil {
return nil, err
}
}
//.........这里部分代码省略.........
示例4: uploadNodeRegularFile
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
// TODO(mpl): maybe break this func into more maintainable pieces?
filebb := schema.NewCommonFileMap(n.fullPath, n.fi)
filebb.SetType("file")
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
if up.fileOpts.exifTime {
ra, ok := file.(io.ReaderAt)
if !ok {
return nil, errors.New("Error asserting local file to io.ReaderAt")
}
modtime, err := schema.FileTime(ra)
if err != nil {
log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
} else {
filebb.SetModTime(modtime)
}
}
var (
size = n.fi.Size()
fileContents io.Reader = io.LimitReader(file, size)
br *blobref.BlobRef // of file schemaref
sum string // sha1 hashsum of the file to upload
pr *client.PutResult // of the final "file" schema blob
)
const dupCheckThreshold = 256 << 10
if size > dupCheckThreshold {
sumRef, err := up.wholeFileDigest(n.fullPath)
if err == nil {
sum = sumRef.String()
ok := false
pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
if ok {
br = pr.BlobRef
noteFileUploaded(n.fullPath, !pr.Skipped)
if up.fileOpts.wantVivify() {
// we can return early in that case, because the other options
// are disallowed in the vivify case.
return pr, nil
}
}
}
}
if up.fileOpts.wantVivify() {
// If vivify wasn't already done in fileMapFromDuplicate.
err := schema.WriteFileChunks(up.statReceiver(n), filebb, fileContents)
if err != nil {
return nil, err
}
json, err := filebb.JSON()
if err != nil {
return nil, err
}
br = blobref.SHA1FromString(json)
h := &client.UploadHandle{
BlobRef: br,
Size: int64(len(json)),
Contents: strings.NewReader(json),
Vivify: true,
}
pr, err = up.Upload(h)
if err != nil {
return nil, err
}
noteFileUploaded(n.fullPath, true)
return pr, nil
}
if br == nil {
// br still nil means fileMapFromDuplicate did not find the file on the server,
// and the file has not just been uploaded subsequently to a vivify request.
// So we do the full file + file schema upload here.
if sum == "" && up.fileOpts.wantFilePermanode() {
fileContents = &trackDigestReader{r: fileContents}
}
br, err = schema.WriteFileMap(up.statReceiver(n), filebb, fileContents)
if err != nil {
return nil, err
}
}
// TODO(mpl): test that none of these claims get uploaded if they've already been done
if up.fileOpts.wantFilePermanode() {
if td, ok := fileContents.(*trackDigestReader); ok {
sum = td.Sum()
}
// Use a fixed time value for signing; not using modtime
// so two identical files don't have different modtimes?
// TODO(bradfitz): consider this more?
permaNodeSigTime := time.Unix(0, 0)
permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime)
if err != nil {
return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
}
//.........这里部分代码省略.........
示例5: uploadNode
func (up *Uploader) uploadNode(n *node) (*client.PutResult, error) {
fi := n.fi
m := schema.NewCommonFileMap(n.fullPath, fi)
mode := fi.Mode()
switch {
case mode&os.ModeSymlink != 0:
// TODO(bradfitz): use VFS here; PopulateSymlinkMap uses os.Readlink directly.
if err := schema.PopulateSymlinkMap(m, n.fullPath); err != nil {
return nil, err
}
case mode&os.ModeDevice != 0:
// including mode & os.ModeCharDevice
fallthrough
case mode&os.ModeSocket != 0:
fallthrough
case mode&os.ModeNamedPipe != 0: // FIFO
fallthrough
default:
return nil, schema.ErrUnimplemented
case mode&os.ModeType == 0: // regular file
m["camliType"] = "file"
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
statReceiver := up.altStatReceiver
if statReceiver == nil {
// TODO(bradfitz): just make Client be a
// StatReceiver? move remote's ReceiveBlob ->
// Upload wrapper into Client itself?
statReceiver = remote.NewFromClient(up.Client)
}
schemaWriteFileMap := schema.WriteFileMap
if up.rollSplits {
schemaWriteFileMap = schema.WriteFileMapRolling
}
blobref, err := schemaWriteFileMap(statReceiver, m, io.LimitReader(file, fi.Size()))
if err != nil {
return nil, err
}
// TODO(bradfitz): taking a PutResult here is kinda
// gross. should instead make a blobserver.Storage
// wrapper type that can track some of this? or that
// updates the client stats directly or something.
{
json, _ := schema.MapToCamliJSON(m)
pr := &client.PutResult{BlobRef: blobref, Size: int64(len(json)), Skipped: false}
return pr, nil
}
case fi.IsDir():
ss := new(schema.StaticSet)
for _, c := range n.children {
pr, err := c.PutResult()
if err != nil {
return nil, fmt.Errorf("Error populating directory static set for child %q: %v", c.fullPath, err)
}
ss.Add(pr.BlobRef)
}
sspr, err := up.UploadMap(ss.Map())
if err != nil {
return nil, err
}
schema.PopulateDirectoryMap(m, sspr.BlobRef)
}
mappr, err := up.UploadMap(m)
if err == nil {
if !mappr.Skipped {
vlog.Printf("Uploaded %q, %s for %s", m["camliType"], mappr.BlobRef, n.fullPath)
}
} else {
vlog.Printf("Error uploading map %v: %v", m, err)
}
return mappr, err
}
示例6: uploadNodeRegularFile
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
m := schema.NewCommonFileMap(n.fullPath, n.fi)
m["camliType"] = "file"
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
size := n.fi.Size()
var fileContents io.Reader = io.LimitReader(file, size)
if up.fileOpts.wantVivify() {
err := schema.WriteFileChunks(up.statReceiver(), m, fileContents)
if err != nil {
return nil, err
}
json, err := m.JSON()
if err != nil {
return nil, err
}
bref := blobref.SHA1FromString(json)
h := &client.UploadHandle{
BlobRef: bref,
Size: int64(len(json)),
Contents: strings.NewReader(json),
Vivify: true,
}
return up.Upload(h)
}
var (
blobref *blobref.BlobRef // of file schemaref
sum string // "sha1-xxxxx"
)
const dupCheckThreshold = 256 << 10
if size > dupCheckThreshold {
sumRef, err := up.wholeFileDigest(n.fullPath)
if err == nil {
sum = sumRef.String()
if ref, ok := up.fileMapFromDuplicate(up.statReceiver(), m, sum); ok {
blobref = ref
}
}
}
if blobref == nil {
if sum == "" && up.fileOpts.wantFilePermanode() {
fileContents = &trackDigestReader{r: fileContents}
}
blobref, err = schema.WriteFileMap(up.statReceiver(), m, fileContents)
if err != nil {
return nil, err
}
}
// TODO(mpl): test that none of these claims get uploaded if they've already been done
if up.fileOpts.wantFilePermanode() {
if td, ok := fileContents.(*trackDigestReader); ok {
sum = td.Sum()
}
// Use a fixed time value for signing; not using modtime
// so two identical files don't have different modtimes?
// TODO(bradfitz): consider this more?
permaNodeSigTime := time.Unix(0, 0)
permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime)
if err != nil {
return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
}
handleResult("node-permanode", permaNode, nil)
// claimTime is both the time of the "claimDate" in the
// JSON claim, as well as the date in the OpenPGP
// header.
// TODO(bradfitz): this is a little clumsy to do by hand.
// There should probably be a method on *Uploader to do this
// from an unsigned schema map. Maybe ditch the schema.Claimer
// type and just have the Uploader override the claimDate.
claimTime := n.fi.ModTime()
contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", blobref.String())
contentAttr.SetClaimDate(claimTime)
signed, err := up.SignMap(contentAttr, claimTime)
if err != nil {
return nil, fmt.Errorf("Failed to sign content claim for node %v: %v", n, err)
}
put, err := up.uploadString(signed)
if err != nil {
return nil, fmt.Errorf("Error uploading permanode's attribute for node %v: %v", n, err)
}
handleResult("node-permanode-contentattr", put, nil)
if tags := up.fileOpts.tags(); len(tags) > 0 {
// TODO(mpl): do these claims concurrently, not in series
for _, tag := range tags {
m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag)
m.SetClaimDate(claimTime)
// TODO(mpl): verify that SetClaimDate does modify the GPG signature date of the claim
signed, err := up.SignMap(m, claimTime)
//.........这里部分代码省略.........