本文整理匯總了Golang中camlistore/org/pkg/schema.Blob.NewFileReader方法的典型用法代碼示例。如果您正苦於以下問題:Golang Blob.NewFileReader方法的具體用法?Golang Blob.NewFileReader怎麽用?Golang Blob.NewFileReader使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類camlistore/org/pkg/schema.Blob
的用法示例。
在下文中一共展示了Blob.NewFileReader方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: readPrefixOrFile
// readPrefixOrFile executes a given func with a reader on the passed prefix and
// falls back to passing a reader on the whole file if the func returns an error.
func readPrefixOrFile(prefix []byte, fetcher blob.Fetcher, b *schema.Blob, fn func(filePrefixReader) error) (err error) {
pr := bytes.NewReader(prefix)
err = fn(pr)
if err == io.EOF || err == io.ErrUnexpectedEOF {
var fr *schema.FileReader
fr, err = b.NewFileReader(fetcher)
if err == nil {
err = fn(fr)
fr.Close()
}
}
return err
}
示例2: populateFile
// b: the parsed file schema blob
// mm: keys to populate
func (ix *Index) populateFile(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) (err error) {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
fr, err := b.NewFileReader(fetcher)
if err != nil {
return err
}
defer fr.Close()
mime, reader := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
imageBuf = &keepFirstN{N: 512 << 10}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, reader)
if err != nil {
return err
}
wholeRef := blob.RefFromHash(sha1)
if imageBuf != nil {
conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes))
// If our optimistic 512KB in-memory prefix from above was too short to get the dimensions, pass the whole thing instead and try again.
if err == io.ErrUnexpectedEOF {
var fr *schema.FileReader
fr, err = b.NewFileReader(fetcher)
if err == nil {
conf, err = images.DecodeConfig(fr)
fr.Close()
}
}
if err == nil {
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
times = append(times, ft)
} else {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
// TODO(mpl): find (generate?) more broken EXIF images to experiment with.
err = indexEXIF(wholeRef, bytes.NewReader(imageBuf.Bytes), mm)
if err == io.EOF {
var fr *schema.FileReader
fr, err = b.NewFileReader(fetcher)
if err == nil {
err = indexEXIF(wholeRef, fr, mm)
fr.Close()
}
}
if err != nil {
log.Printf("error parsing EXIF: %v", err)
}
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime, wholeRef))
mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
if strings.HasPrefix(mime, "audio/") {
indexMusic(io.NewSectionReader(fr, 0, fr.Size()), wholeRef, mm)
}
return nil
}
示例3: populateFile
// b: the parsed file schema blob
// mm: keys to populate
func (ix *Index) populateFile(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) (err error) {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
fr, err := b.NewFileReader(fetcher)
if err != nil {
return err
}
defer fr.Close()
mime, reader := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
// Emperically derived 1MiB assuming CR2 images require more than any
// other filetype we support:
// https://gist.github.com/wathiede/7982372
imageBuf = &keepFirstN{N: 1 << 20}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, reader)
if err != nil {
return err
}
wholeRef := blob.RefFromHash(sha1)
if imageBuf != nil {
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
times = append(times, ft)
} else {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
indexEXIF(wholeRef, imageBuf.Bytes, mm)
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
if strings.HasPrefix(mime, "audio/") {
indexMusic(io.NewSectionReader(fr, 0, fr.Size()), wholeRef, mm)
}
return nil
}
示例4: populateFile
// blobref: of the file or schema blob
// blob: the parsed file schema blob
// bm: keys to populate
func (ix *Index) populateFile(b *schema.Blob, bm BatchMutation) error {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
seekFetcher := blob.SeekerFromStreamingFetcher(ix.BlobSource)
fr, err := b.NewFileReader(seekFetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
// Basically the same as the TODO just below.
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
return nil
}
defer fr.Close()
mime, reader := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
imageBuf = &keepFirstN{N: 256 << 10}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, reader)
if err != nil {
// TODO: job scheduling system to retry this spaced
// out max n times. Right now our options are
// ignoring this error (forever) or returning the
// error and making the indexing try again (likely
// forever failing). Both options suck. For now just
// log and act like all's okay.
log.Printf("index: error indexing file %s: %v", blobRef, err)
return nil
}
if imageBuf != nil {
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
bm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
times = append(times, ft)
} else {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
wholeRef := blob.RefFromHash(sha1)
bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
bm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
if strings.HasPrefix(mime, "audio/") {
tag, err := taglib.Decode(fr, fr.Size())
if err == nil {
indexMusic(tag, wholeRef, bm)
} else {
log.Print("index: error parsing tag: ", err)
}
}
return nil
}
示例5: populateFile
// b: the parsed file schema blob
// mm: keys to populate
func (ix *Index) populateFile(b *schema.Blob, mm *mutationMap) (err error) {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
fetcher := &seekFetcherMissTracker{
// TODO(bradfitz): cache this SeekFetcher on ix so it
// it's have to be re-made each time? Probably small.
src: blob.SeekerFromStreamingFetcher(ix.BlobSource),
}
defer func() {
if err == nil {
return
}
fetcher.mu.Lock()
defer fetcher.mu.Unlock()
if len(fetcher.missing) == 0 {
return
}
// TODO(bradfitz): there was an error indexing this file, and
// we failed to load the blobs in f.missing. Add those as dependencies
// somewhere so when we get one of those missing blobs, we kick off
// a re-index of this file for whenever the indexer is idle.
}()
fr, err := b.NewFileReader(fetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
// Basically the same as the TODO just below.
//
// We'll also want to bump the schemaVersion after this,
// to fix anybody's index which is only partial due to
// this old bug where it would return nil instead of doing
// the necessary work.
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
return nil
}
defer fr.Close()
mime, reader := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
// Emperically derived 1MiB assuming CR2 images require more than any
// other filetype we support:
// https://gist.github.com/wathiede/7982372
imageBuf = &keepFirstN{N: 1 << 20}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, reader)
if err != nil {
// TODO: job scheduling system to retry this spaced
// out max n times. Right now our options are
// ignoring this error (forever) or returning the
// error and making the indexing try again (likely
// forever failing). Both options suck. For now just
// log and act like all's okay.
//
// See TODOs above, and the fetcher.missing stuff.
log.Printf("index: error indexing file %s: %v", blobRef, err)
return nil
}
wholeRef := blob.RefFromHash(sha1)
if imageBuf != nil {
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
times = append(times, ft)
} else {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
indexEXIF(wholeRef, imageBuf.Bytes, mm)
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
//.........這裏部分代碼省略.........
示例6: populateFile
// b: the parsed file schema blob
// mm: keys to populate
func (ix *Index) populateFile(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) (err error) {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
fr, err := b.NewFileReader(fetcher)
if err != nil {
return err
}
defer fr.Close()
mime, mr := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
imageBuf = &keepFirstN{N: 512 << 10}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, mr)
if err != nil {
return err
}
wholeRef := blob.RefFromHash(sha1)
if imageBuf != nil {
var conf images.Config
decodeConfig := func(r filePrefixReader) error {
conf, err = images.DecodeConfig(r)
return err
}
if err := readPrefixOrFile(imageBuf.Bytes, fetcher, b, decodeConfig); err == nil {
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
var ft time.Time
fileTime := func(r filePrefixReader) error {
ft, err = schema.FileTime(r)
return err
}
if err = readPrefixOrFile(imageBuf.Bytes, fetcher, b, fileTime); err == nil {
times = append(times, ft)
}
if exifDebug {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
// TODO(mpl): find (generate?) more broken EXIF images to experiment with.
indexEXIFData := func(r filePrefixReader) error {
return indexEXIF(wholeRef, r, mm)
}
if err = readPrefixOrFile(imageBuf.Bytes, fetcher, b, indexEXIFData); err != nil {
if exifDebug {
log.Printf("error parsing EXIF: %v", err)
}
}
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime, wholeRef))
mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
if strings.HasPrefix(mime, "audio/") {
indexMusic(io.NewSectionReader(fr, 0, fr.Size()), wholeRef, mm)
}
return nil
}
示例7: populateFile
// blobref: of the file or schema blob
// ss: the parsed file schema blob
// bm: keys to populate
func (ix *Index) populateFile(blob *schema.Blob, bm BatchMutation) error {
// TODO: move the NewFileReader off of blob.
blobRef := blob.BlobRef()
seekFetcher := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
fr, err := blob.NewFileReader(seekFetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
// Basically the same as the TODO just below.
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
return nil
}
defer fr.Close()
mime, reader := magic.MimeTypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var withCopyErr func(error) // or nil
if strings.HasPrefix(mime, "image/") {
pr, pw := io.Pipe()
copyDest = io.MultiWriter(copyDest, pw)
confc := make(chan *image.Config, 1)
go func() {
conf, _, err := image.DecodeConfig(pr)
defer io.Copy(ioutil.Discard, pr)
if err == nil {
confc <- &conf
} else {
confc <- nil
}
}()
withCopyErr = func(err error) {
pw.CloseWithError(err)
if conf := <-confc; conf != nil {
bm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
}
}
size, err := io.Copy(copyDest, reader)
if f := withCopyErr; f != nil {
f(err)
}
if err != nil {
// TODO: job scheduling system to retry this spaced
// out max n times. Right now our options are
// ignoring this error (forever) or returning the
// error and making the indexing try again (likely
// forever failing). Both options suck. For now just
// log and act like all's okay.
log.Printf("index: error indexing file %s: %v", blobRef, err)
return nil
}
wholeRef := blobref.FromHash(sha1)
bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, blob.FileName(), mime))
return nil
}