本文整理汇总了Golang中camlistore/org/pkg/schema.NewFileMap函数的典型用法代码示例。如果您正苦于以下问题:Golang NewFileMap函数的具体用法?Golang NewFileMap怎么用?Golang NewFileMap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewFileMap函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: UploadFile
func (id *IndexDeps) UploadFile(fileName string, contents string) (fileRef, wholeRef *blobref.BlobRef) {
cb := &test.Blob{Contents: contents}
id.BlobSource.AddBlob(cb)
wholeRef = cb.BlobRef()
_, err := id.Index.ReceiveBlob(wholeRef, cb.Reader())
if err != nil {
panic(err)
}
m := schema.NewFileMap(fileName)
schema.PopulateParts(m, int64(len(contents)), []schema.BytesPart{
schema.BytesPart{
Size: uint64(len(contents)),
BlobRef: wholeRef,
}})
fjson, err := schema.MapToCamliJSON(m)
if err != nil {
panic(err)
}
fb := &test.Blob{Contents: fjson}
log.Printf("Blob is: %s", fjson)
id.BlobSource.AddBlob(fb)
fileRef = fb.BlobRef()
_, err = id.Index.ReceiveBlob(fileRef, fb.Reader())
if err != nil {
panic(err)
}
return
}
示例2: UploadFile
func (id *IndexDeps) UploadFile(fileName string, contents string) (fileRef, wholeRef *blobref.BlobRef) {
cb := &test.Blob{Contents: contents}
id.BlobSource.AddBlob(cb)
wholeRef = cb.BlobRef()
_, err := id.Index.ReceiveBlob(wholeRef, cb.Reader())
if err != nil {
id.Fatalf("UploadFile.ReceiveBlob: %v", err)
}
m := schema.NewFileMap(fileName)
m.PopulateParts(int64(len(contents)), []schema.BytesPart{
schema.BytesPart{
Size: uint64(len(contents)),
BlobRef: wholeRef,
}})
fjson, err := m.JSON()
if err != nil {
id.Fatalf("UploadFile.JSON: %v", err)
}
fb := &test.Blob{Contents: fjson}
id.BlobSource.AddBlob(fb)
fileRef = fb.BlobRef()
_, err = id.Index.ReceiveBlob(fileRef, fb.Reader())
if err != nil {
panic(err)
}
return
}
示例3: UploadFile
// If modTime is zero, it's not used.
func (id *IndexDeps) UploadFile(fileName string, contents string, modTime time.Time) (fileRef, wholeRef blob.Ref) {
wholeRef = id.UploadString(contents)
m := schema.NewFileMap(fileName)
m.PopulateParts(int64(len(contents)), []schema.BytesPart{
schema.BytesPart{
Size: uint64(len(contents)),
BlobRef: wholeRef,
}})
if !modTime.IsZero() {
m.SetModTime(modTime)
}
fjson, err := m.JSON()
if err != nil {
id.Fatalf("UploadFile.JSON: %v", err)
}
fb := &test.Blob{Contents: fjson}
id.BlobSource.AddBlob(fb)
fileRef = fb.BlobRef()
_, err = id.Index.ReceiveBlob(fileRef, fb.Reader())
if err != nil {
panic(err)
}
return
}
示例4: UploadFile
// UploadFile uploads the contents of the file, as well as a file blob with
// filename for these contents. If the contents or the file blob are found on
// the server, they're not uploaded.
//
// Note: this method is still a work in progress, and might change to accomodate
// the needs of camput file.
func (cl *Client) UploadFile(filename string, contents io.Reader, opts *FileUploadOptions) (blob.Ref, error) {
fileMap := schema.NewFileMap(filename)
if opts != nil && opts.FileInfo != nil {
fileMap = schema.NewCommonFileMap(filename, opts.FileInfo)
modTime := opts.FileInfo.ModTime()
if !modTime.IsZero() {
fileMap.SetModTime(modTime)
}
}
fileMap.SetType("file")
var wholeRef blob.Ref
if opts != nil && opts.WholeRef.Valid() {
wholeRef = opts.WholeRef
} else {
var buf bytes.Buffer
var err error
wholeRef, err = cl.wholeRef(io.TeeReader(contents, &buf))
if err != nil {
return blob.Ref{}, err
}
contents = io.MultiReader(&buf, contents)
}
// TODO(mpl): should we consider the case (not covered by fileMapFromDuplicate)
// where all the parts are there, but the file schema/blob does not exist? Can that
// even happen ? I'm naively assuming it can't for now, since that's what camput file
// does too.
fileRef, err := cl.fileMapFromDuplicate(fileMap, wholeRef)
if err != nil {
return blob.Ref{}, err
}
if fileRef.Valid() {
return fileRef, nil
}
return schema.WriteFileMap(cl, fileMap, contents)
}
示例5: uploadNodeRegularFile
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
var filebb *schema.Builder
if up.fileOpts.contentsOnly {
filebb = schema.NewFileMap("")
} else {
filebb = schema.NewCommonFileMap(n.fullPath, n.fi)
}
filebb.SetType("file")
up.fdGate.Start()
defer up.fdGate.Done()
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
if !up.fileOpts.contentsOnly {
if up.fileOpts.exifTime {
ra, ok := file.(io.ReaderAt)
if !ok {
return nil, errors.New("Error asserting local file to io.ReaderAt")
}
modtime, err := schema.FileTime(ra)
if err != nil {
log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
} else {
filebb.SetModTime(modtime)
}
}
if up.fileOpts.wantCapCtime() {
filebb.CapCreationTime()
}
}
var (
size = n.fi.Size()
fileContents io.Reader = io.LimitReader(file, size)
br blob.Ref // of file schemaref
sum string // sha1 hashsum of the file to upload
pr *client.PutResult // of the final "file" schema blob
)
const dupCheckThreshold = 256 << 10
if size > dupCheckThreshold {
sumRef, err := up.wholeFileDigest(n.fullPath)
if err == nil {
sum = sumRef.String()
ok := false
pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
if ok {
br = pr.BlobRef
android.NoteFileUploaded(n.fullPath, !pr.Skipped)
if up.fileOpts.wantVivify() {
// we can return early in that case, because the other options
// are disallowed in the vivify case.
return pr, nil
}
}
}
}
if up.fileOpts.wantVivify() {
// If vivify wasn't already done in fileMapFromDuplicate.
err := schema.WriteFileChunks(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
if err != nil {
return nil, err
}
json, err := filebb.JSON()
if err != nil {
return nil, err
}
br = blob.SHA1FromString(json)
h := &client.UploadHandle{
BlobRef: br,
Size: uint32(len(json)),
Contents: strings.NewReader(json),
Vivify: true,
}
pr, err = up.Upload(h)
if err != nil {
return nil, err
}
android.NoteFileUploaded(n.fullPath, true)
return pr, nil
}
if !br.Valid() {
// br still zero means fileMapFromDuplicate did not find the file on the server,
// and the file has not just been uploaded subsequently to a vivify request.
// So we do the full file + file schema upload here.
if sum == "" && up.fileOpts.wantFilePermanode() {
fileContents = &trackDigestReader{r: fileContents}
}
br, err = schema.WriteFileMap(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
if err != nil {
return nil, err
}
}
//.........这里部分代码省略.........
示例6: populate
// Populates the bs, and the index at the same time through the sync handler
func populate(b *testing.B, dbfile string,
sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
b.Logf("populating %v", dbfile)
kv, err := sortedProvider(dbfile)
if err != nil {
b.Fatal(err)
}
bsRoot := filepath.Join(filepath.Dir(dbfile), "bs")
if err := os.MkdirAll(bsRoot, 0700); err != nil {
b.Fatal(err)
}
dataDir, err := os.Open("testdata")
if err != nil {
b.Fatal(err)
}
fis, err := dataDir.Readdir(-1)
if err != nil {
b.Fatal(err)
}
if len(fis) == 0 {
b.Fatalf("no files in %s dir", "testdata")
}
ks := doKeyStuff(b)
bs, err := localdisk.New(bsRoot)
if err != nil {
b.Fatal(err)
}
if _, err := blobserver.Receive(bs, ks.pubKeyRef, strings.NewReader(ks.pubKey)); err != nil {
b.Fatal(err)
}
idx, err := index.New(kv)
if err != nil {
b.Fatal(err)
}
idx.InitBlobSource(bs)
sh := server.NewSyncHandler("/bs/", "/index/", bs, idx, sorted.NewMemoryKeyValue())
b.ResetTimer()
for _, v := range fis {
f, err := os.Open(filepath.Join(dataDir.Name(), v.Name()))
if err != nil {
b.Fatal(err)
}
td := &trackDigestReader{r: f}
fm := schema.NewFileMap(v.Name())
fm.SetModTime(v.ModTime())
fileRef, err := schema.WriteFileMap(bs, fm, td)
if err != nil {
b.Fatal(err)
}
f.Close()
unsigned := schema.NewPlannedPermanode(td.Sum())
unsigned.SetSigner(ks.pubKeyRef)
sr := &jsonsign.SignRequest{
UnsignedJSON: unsigned.Blob().JSON(),
// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
Fetcher: bs,
EntityFetcher: ks.entityFetcher,
SignatureTime: time.Unix(0, 0),
}
signed, err := sr.Sign()
if err != nil {
b.Fatal("problem signing: " + err.Error())
}
pn := blob.SHA1FromString(signed)
// N.B: use blobserver.Receive so that the blob hub gets notified, and the blob gets enqueued into the index
if _, err := blobserver.Receive(bs, pn, strings.NewReader(signed)); err != nil {
b.Fatal(err)
}
contentAttr := schema.NewSetAttributeClaim(pn, "camliContent", fileRef.String())
claimTime, ok := fm.ModTime()
if !ok {
b.Fatal(err)
}
contentAttr.SetClaimDate(claimTime)
contentAttr.SetSigner(ks.pubKeyRef)
sr = &jsonsign.SignRequest{
UnsignedJSON: contentAttr.Blob().JSON(),
// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
Fetcher: bs,
EntityFetcher: ks.entityFetcher,
SignatureTime: claimTime,
}
signed, err = sr.Sign()
if err != nil {
b.Fatal("problem signing: " + err.Error())
}
cl := blob.SHA1FromString(signed)
if _, err := blobserver.Receive(bs, cl, strings.NewReader(signed)); err != nil {
b.Fatal(err)
}
}
sh.IdleWait()
return idx
//.........这里部分代码省略.........
示例7: TestPackerBoundarySplits
func TestPackerBoundarySplits(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow test")
}
// Test a file of three chunk sizes, totalling near the 16 MB
// boundary:
// - 1st chunk is 6 MB. ("blobA")
// - 2nd chunk is 6 MB. ("blobB")
// - 3rd chunk ("blobC") is binary-searched (up to 4MB) to find
// which size causes the packer to write two zip files.
// During the test we set zip overhead boundaries to 0, to
// force the test to into its pathological misprediction code paths,
// where it needs to back up and rewrite the zip with one part less.
// That's why the test starts with two zip files: so there's at
// least one that can be removed to make room.
defer setIntTemporarily(&zipPerEntryOverhead, 0)()
const sizeAB = 12 << 20
const maxBlobSize = 16 << 20
bytesAB := randBytes(sizeAB)
blobA := &test.Blob{string(bytesAB[:sizeAB/2])}
blobB := &test.Blob{string(bytesAB[sizeAB/2:])}
refA := blobA.BlobRef()
refB := blobB.BlobRef()
bytesCFull := randBytes(maxBlobSize - sizeAB) // will be sliced down
// Mechanism to verify we hit the back-up code path:
var (
mu sync.Mutex
sawTruncate blob.Ref
stoppedBeforeOverflow bool
)
testHookSawTruncate = func(after blob.Ref) {
if after != refB {
t.Errorf("unexpected truncate point %v", after)
}
mu.Lock()
defer mu.Unlock()
sawTruncate = after
}
testHookStopBeforeOverflowing = func() {
mu.Lock()
defer mu.Unlock()
stoppedBeforeOverflow = true
}
defer func() {
testHookSawTruncate = nil
testHookStopBeforeOverflowing = nil
}()
generatesTwoZips := func(sizeC int) (ret bool) {
large := new(test.Fetcher)
s := &storage{
small: new(test.Fetcher),
large: large,
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: ",
// Ignore these phrases:
"Packing file ",
"Packed file ",
),
}
s.init()
// Upload first two chunks
blobA.MustUpload(t, s)
blobB.MustUpload(t, s)
// Upload second chunk
bytesC := bytesCFull[:sizeC]
h := blob.NewHash()
h.Write(bytesC)
refC := blob.RefFromHash(h)
_, err := s.ReceiveBlob(refC, bytes.NewReader(bytesC))
if err != nil {
t.Fatal(err)
}
// Upload the file schema blob.
m := schema.NewFileMap("foo.dat")
m.PopulateParts(sizeAB+int64(sizeC), []schema.BytesPart{
schema.BytesPart{
Size: sizeAB / 2,
BlobRef: refA,
},
schema.BytesPart{
Size: sizeAB / 2,
BlobRef: refB,
},
schema.BytesPart{
Size: uint64(sizeC),
BlobRef: refC,
},
})
fjson, err := m.JSON()
if err != nil {
t.Fatalf("schema filemap JSON: %v", err)
}
fb := &test.Blob{Contents: fjson}
//.........这里部分代码省略.........