本文整理匯總了Golang中camlistore/org/pkg/types.Time3339函數的典型用法代碼示例。如果您正苦於以下問題:Golang Time3339函數的具體用法?Golang Time3339怎麽用?Golang Time3339使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Time3339函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestQueryPermanodeModtime
func TestQueryPermanodeModtime(t *testing.T) {
testQuery(t, func(qt *queryTest) {
id := qt.id
// indextest advances time one second per operation:
p1 := id.NewPlannedPermanode("1")
p2 := id.NewPlannedPermanode("2")
p3 := id.NewPlannedPermanode("3")
id.SetAttribute(p1, "someAttr", "value1") // 2011-11-28 01:32:37.000123456 +0000 UTC 1322443957
id.SetAttribute(p2, "someAttr", "value2") // 2011-11-28 01:32:38.000123456 +0000 UTC 1322443958
id.SetAttribute(p3, "someAttr", "value3") // 2011-11-28 01:32:39.000123456 +0000 UTC 1322443959
sq := &SearchQuery{
Constraint: &Constraint{
Permanode: &PermanodeConstraint{
ModTime: &TimeConstraint{
After: types.Time3339(time.Unix(1322443957, 456789)),
Before: types.Time3339(time.Unix(1322443959, 0)),
},
},
},
}
qt.wantRes(sq, p2)
})
}
示例2: RunCommand
func (c *desCmd) RunCommand(args []string) error {
if len(args) == 0 {
return cmdmain.UsageError("requires blobref")
}
var blobs []blob.Ref
for _, arg := range args {
br, ok := blob.Parse(arg)
if !ok {
return cmdmain.UsageError(fmt.Sprintf("invalid blobref %q", arg))
}
blobs = append(blobs, br)
}
var at time.Time // TODO: implement. from "2 days ago" "-2d", "-2h", "2013-02-05", etc
cl := c.client()
res, err := cl.Describe(&search.DescribeRequest{
BlobRefs: blobs,
Depth: c.depth,
At: types.Time3339(at),
})
if err != nil {
return err
}
resj, err := json.MarshalIndent(res, "", " ")
if err != nil {
return err
}
resj = append(resj, '\n')
_, err = os.Stdout.Write(resj)
return err
}
示例3: GetClaims
// GetClaims returns the claims on req.Permanode signed by sh.owner.
func (sh *Handler) GetClaims(req *ClaimsRequest) (*ClaimsResponse, error) {
// TODO: rename GetOwnerClaims to GetClaims?
if req.Permanode == nil {
return nil, errors.New("Error getting claims: nil permanode.")
}
claims, err := sh.index.GetOwnerClaims(req.Permanode, sh.owner)
if err != nil {
return nil, fmt.Errorf("Error getting claims of %s: %v", req.Permanode.String(), err)
}
sort.Sort(claims)
var jclaims []*ClaimsItem
for _, claim := range claims {
jclaim := &ClaimsItem{
BlobRef: claim.BlobRef,
Signer: claim.Signer,
Permanode: claim.Permanode,
Date: types.Time3339(claim.Date),
Type: claim.Type,
Attr: claim.Attr,
Value: claim.Value,
}
jclaims = append(jclaims, jclaim)
}
res := &ClaimsResponse{
Claims: jclaims,
}
return res, nil
}
示例4: GetRecentPermanodes
// GetRecentPermanodes returns recently-modified permanodes.
func (sh *Handler) GetRecentPermanodes(req *RecentRequest) (*RecentResponse, error) {
ch := make(chan *Result)
errch := make(chan error)
go func() {
errch <- sh.index.GetRecentPermanodes(ch, sh.owner, req.n())
}()
dr := sh.NewDescribeRequest()
var recent []*RecentItem
for res := range ch {
dr.Describe(res.BlobRef, 2)
recent = append(recent, &RecentItem{
BlobRef: res.BlobRef,
Owner: res.Signer,
ModTime: types.Time3339(time.Unix(res.LastModTime, 0)),
})
testHookBug121() // http://camlistore.org/issue/121
}
if err := <-errch; err != nil {
return nil, err
}
metaMap, err := dr.metaMapThumbs(req.thumbnailSize())
if err != nil {
return nil, err
}
res := &RecentResponse{
Recent: recent,
Meta: metaMap,
}
return res, nil
}
示例5: GetClaims
// GetClaims returns the claims on req.Permanode signed by sh.owner.
func (sh *Handler) GetClaims(req *ClaimsRequest) (*ClaimsResponse, error) {
if !req.Permanode.Valid() {
return nil, errors.New("Error getting claims: nil permanode.")
}
var claims []camtypes.Claim
claims, err := sh.index.AppendClaims(claims, req.Permanode, sh.owner, req.AttrFilter)
if err != nil {
return nil, fmt.Errorf("Error getting claims of %s: %v", req.Permanode.String(), err)
}
sort.Sort(camtypes.ClaimsByDate(claims))
var jclaims []*ClaimsItem
for _, claim := range claims {
jclaim := &ClaimsItem{
BlobRef: claim.BlobRef,
Signer: claim.Signer,
Permanode: claim.Permanode,
Date: types.Time3339(claim.Date),
Type: claim.Type,
Attr: claim.Attr,
Value: claim.Value,
}
jclaims = append(jclaims, jclaim)
}
res := &ClaimsResponse{
Claims: jclaims,
}
return res, nil
}
示例6: URLSuffix
func (r *RecentRequest) URLSuffix() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "camli/search/recent?n=%d&thumbnails=%d", r.n(), r.thumbnailSize())
if !r.Before.IsZero() {
fmt.Fprintf(&buf, "&before=%s", types.Time3339(r.Before))
}
return buf.String()
}
示例7: Predicate
func (b before) Predicate(ctx context.Context, args []string) (*Constraint, error) {
t, err := parseTimePrefix(args[0])
if err != nil {
return nil, err
}
tc := &TimeConstraint{}
tc.Before = types.Time3339(t)
c := &Constraint{
Permanode: &PermanodeConstraint{
Time: tc,
},
}
return c, nil
}
示例8: GetRecentPermanodes
// GetRecentPermanodes returns recently-modified permanodes.
func (sh *Handler) GetRecentPermanodes(req *RecentRequest) (*RecentResponse, error) {
ch := make(chan camtypes.RecentPermanode)
errch := make(chan error, 1)
before := time.Now()
if !req.Before.IsZero() {
before = req.Before
}
go func() {
errch <- sh.index.GetRecentPermanodes(ch, sh.owner, req.n(), before)
}()
dr := sh.NewDescribeRequest()
var recent []*RecentItem
for res := range ch {
dr.Describe(res.Permanode, 2)
recent = append(recent, &RecentItem{
BlobRef: res.Permanode,
Owner: res.Signer,
ModTime: types.Time3339(res.LastModTime),
})
testHookBug121() // http://camlistore.org/issue/121
}
if err := <-errch; err != nil {
return nil, err
}
metaMap, err := dr.metaMap()
if err != nil {
return nil, err
}
res := &RecentResponse{
Recent: recent,
Meta: metaMap,
}
return res, nil
}
示例9: serveDiscovery
func (rh *RootHandler) serveDiscovery(rw http.ResponseWriter, req *http.Request) {
d := &camtypes.Discovery{
BlobRoot: rh.BlobRoot,
JSONSignRoot: rh.JSONSignRoot,
HelpRoot: rh.helpRoot,
ImporterRoot: rh.importerRoot,
SearchRoot: rh.SearchRoot,
StatusRoot: rh.statusRoot,
OwnerName: rh.OwnerName,
UserName: rh.Username,
WSAuthToken: auth.ProcessRandom(),
ThumbVersion: images.ThumbnailVersion(),
}
if gener, ok := rh.Storage.(blobserver.Generationer); ok {
initTime, gen, err := gener.StorageGeneration()
if err != nil {
d.StorageGenerationError = err.Error()
} else {
d.StorageInitTime = types.Time3339(initTime)
d.StorageGeneration = gen
}
} else {
log.Printf("Storage type %T is not a blobserver.Generationer; not sending storageGeneration", rh.Storage)
}
if rh.ui != nil {
d.UIDiscovery = rh.ui.discovery()
}
if rh.sigh != nil {
d.Signing = rh.sigh.Discovery(rh.JSONSignRoot)
}
if len(rh.sync) > 0 {
syncHandlers := make([]camtypes.SyncHandlerDiscovery, 0, len(rh.sync))
for _, sh := range rh.sync {
syncHandlers = append(syncHandlers, sh.discovery())
}
d.SyncHandlers = syncHandlers
}
discoveryHelper(rw, req, d)
}
示例10: populateFile
// b: the parsed file schema blob
// mm: keys to populate
func (ix *Index) populateFile(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) (err error) {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
fr, err := b.NewFileReader(fetcher)
if err != nil {
return err
}
defer fr.Close()
mime, reader := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
// Emperically derived 1MiB assuming CR2 images require more than any
// other filetype we support:
// https://gist.github.com/wathiede/7982372
imageBuf = &keepFirstN{N: 1 << 20}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, reader)
if err != nil {
return err
}
wholeRef := blob.RefFromHash(sha1)
if imageBuf != nil {
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
times = append(times, ft)
} else {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
indexEXIF(wholeRef, imageBuf.Bytes, mm)
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
if strings.HasPrefix(mime, "audio/") {
indexMusic(io.NewSectionReader(fr, 0, fr.Size()), wholeRef, mm)
}
return nil
}
示例11: parseCoreAtom
func parseCoreAtom(ctx *context.Context, word string) (*Constraint, error) {
if m := tagExpr.FindStringSubmatch(word); m != nil {
c := &Constraint{
Permanode: &PermanodeConstraint{
Attr: "tag",
SkipHidden: true,
Value: m[1],
},
}
return c, nil
}
if m := titleExpr.FindStringSubmatch(word); m != nil {
c := &Constraint{
Permanode: &PermanodeConstraint{
Attr: "title",
SkipHidden: true,
ValueMatches: &StringConstraint{
Contains: m[1],
CaseInsensitive: true,
},
},
}
return c, nil
}
if m := attrExpr.FindStringSubmatch(word); m != nil {
c := &Constraint{
Permanode: &PermanodeConstraint{
Attr: m[1],
SkipHidden: true,
Value: m[2],
},
}
return c, nil
}
if m := childrenOfExpr.FindStringSubmatch(word); m != nil {
c := &Constraint{
Permanode: &PermanodeConstraint{
Relation: &RelationConstraint{
Relation: "parent",
Any: &Constraint{
BlobRefPrefix: m[1],
},
},
},
}
return c, nil
}
if strings.HasPrefix(word, "before:") || strings.HasPrefix(word, "after:") {
before := false
when := ""
if strings.HasPrefix(word, "before:") {
before = true
when = strings.TrimPrefix(word, "before:")
} else {
when = strings.TrimPrefix(word, "after:")
}
base := "0000-01-01T00:00:00Z"
if len(when) < len(base) {
when += base[len(when):]
}
t, err := time.Parse(time.RFC3339, when)
if err != nil {
return nil, err
}
tc := &TimeConstraint{}
if before {
tc.Before = types.Time3339(t)
} else {
tc.After = types.Time3339(t)
}
c := &Constraint{
Permanode: &PermanodeConstraint{
Time: tc,
},
}
return c, nil
}
if strings.HasPrefix(word, "format:") {
c := permOfFile(&FileConstraint{
MIMEType: &StringConstraint{
Equals: mimeFromFormat(strings.TrimPrefix(word, "format:")),
},
})
return c, nil
}
return nil, errors.New(fmt.Sprintf("Not an core-atom: %v", word))
}
示例12: populateFile
// blobref: of the file or schema blob
// blob: the parsed file schema blob
// bm: keys to populate
func (ix *Index) populateFile(b *schema.Blob, bm BatchMutation) error {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
seekFetcher := blob.SeekerFromStreamingFetcher(ix.BlobSource)
fr, err := b.NewFileReader(seekFetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
// Basically the same as the TODO just below.
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
return nil
}
defer fr.Close()
mime, reader := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
imageBuf = &keepFirstN{N: 256 << 10}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, reader)
if err != nil {
// TODO: job scheduling system to retry this spaced
// out max n times. Right now our options are
// ignoring this error (forever) or returning the
// error and making the indexing try again (likely
// forever failing). Both options suck. For now just
// log and act like all's okay.
log.Printf("index: error indexing file %s: %v", blobRef, err)
return nil
}
if imageBuf != nil {
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
bm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
times = append(times, ft)
} else {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
wholeRef := blob.RefFromHash(sha1)
bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
bm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
if strings.HasPrefix(mime, "audio/") {
tag, err := taglib.Decode(fr, fr.Size())
if err == nil {
indexMusic(tag, wholeRef, bm)
} else {
log.Print("index: error parsing tag: ", err)
}
}
return nil
}
示例13: parseExpression
//.........這裏部分代碼省略.........
},
})
continue
}
if strings.HasPrefix(word, "format:") {
andFile(&FileConstraint{
MIMEType: &StringConstraint{
Equals: mimeFromFormat(strings.TrimPrefix(word, "format:")),
},
})
continue
}
if strings.HasPrefix(word, "width:") {
m := whRangeExpr.FindStringSubmatch(strings.TrimPrefix(word, "width:"))
if m == nil {
return nil, errors.New("bogus width range")
}
andFile(&FileConstraint{
IsImage: true,
Width: whIntConstraint(m[1], m[2]),
})
continue
}
if strings.HasPrefix(word, "height:") {
m := whRangeExpr.FindStringSubmatch(strings.TrimPrefix(word, "height:"))
if m == nil {
return nil, errors.New("bogus height range")
}
andFile(&FileConstraint{
IsImage: true,
Height: whIntConstraint(m[1], m[2]),
})
continue
}
if strings.HasPrefix(word, "before:") || strings.HasPrefix(word, "after:") {
before := false
when := ""
if strings.HasPrefix(word, "before:") {
before = true
when = strings.TrimPrefix(word, "before:")
} else {
when = strings.TrimPrefix(word, "after:")
}
base := "0000-01-01T00:00:00Z"
if len(when) < len(base) {
when += base[len(when):]
}
t, err := time.Parse(time.RFC3339, when)
if err != nil {
return nil, err
}
tc := &TimeConstraint{}
if before {
tc.Before = types.Time3339(t)
} else {
tc.After = types.Time3339(t)
}
and(&Constraint{
Permanode: &PermanodeConstraint{
Time: tc,
},
})
continue
}
if strings.HasPrefix(word, "loc:") {
where := strings.TrimPrefix(word, "loc:")
rects, err := geocode.Lookup(ctx, where)
if err != nil {
return nil, err
}
if len(rects) == 0 {
return nil, fmt.Errorf("No location found for %q", where)
}
var locConstraint *Constraint
for i, rect := range rects {
rectConstraint := permOfFile(&FileConstraint{
IsImage: true,
Location: &LocationConstraint{
West: rect.SouthWest.Long,
East: rect.NorthEast.Long,
North: rect.NorthEast.Lat,
South: rect.SouthWest.Lat,
},
})
if i == 0 {
locConstraint = rectConstraint
} else {
locConstraint = orConst(locConstraint, rectConstraint)
}
}
and(locConstraint)
continue
}
log.Printf("Unknown search expression word %q", word)
// TODO: finish. better tokenization. non-operator tokens
// are text searches, etc.
}
return sq, nil
}
示例14: populateFile
// b: the parsed file schema blob
// mm: keys to populate
func (ix *Index) populateFile(b *schema.Blob, mm *mutationMap) (err error) {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
fetcher := &seekFetcherMissTracker{
// TODO(bradfitz): cache this SeekFetcher on ix so it
// it's have to be re-made each time? Probably small.
src: blob.SeekerFromStreamingFetcher(ix.BlobSource),
}
defer func() {
if err == nil {
return
}
fetcher.mu.Lock()
defer fetcher.mu.Unlock()
if len(fetcher.missing) == 0 {
return
}
// TODO(bradfitz): there was an error indexing this file, and
// we failed to load the blobs in f.missing. Add those as dependencies
// somewhere so when we get one of those missing blobs, we kick off
// a re-index of this file for whenever the indexer is idle.
}()
fr, err := b.NewFileReader(fetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
// Basically the same as the TODO just below.
//
// We'll also want to bump the schemaVersion after this,
// to fix anybody's index which is only partial due to
// this old bug where it would return nil instead of doing
// the necessary work.
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
return nil
}
defer fr.Close()
mime, reader := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
// Emperically derived 1MiB assuming CR2 images require more than any
// other filetype we support:
// https://gist.github.com/wathiede/7982372
imageBuf = &keepFirstN{N: 1 << 20}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, reader)
if err != nil {
// TODO: job scheduling system to retry this spaced
// out max n times. Right now our options are
// ignoring this error (forever) or returning the
// error and making the indexing try again (likely
// forever failing). Both options suck. For now just
// log and act like all's okay.
//
// See TODOs above, and the fetcher.missing stuff.
log.Printf("index: error indexing file %s: %v", blobRef, err)
return nil
}
wholeRef := blob.RefFromHash(sha1)
if imageBuf != nil {
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
times = append(times, ft)
} else {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
indexEXIF(wholeRef, imageBuf.Bytes, mm)
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
//.........這裏部分代碼省略.........
示例15: populateFile
// b: the parsed file schema blob
// mm: keys to populate
func (ix *Index) populateFile(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) (err error) {
var times []time.Time // all creation or mod times seen; may be zero
times = append(times, b.ModTime())
blobRef := b.BlobRef()
fr, err := b.NewFileReader(fetcher)
if err != nil {
return err
}
defer fr.Close()
mime, mr := magic.MIMETypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var imageBuf *keepFirstN // or nil
if strings.HasPrefix(mime, "image/") {
imageBuf = &keepFirstN{N: 512 << 10}
copyDest = io.MultiWriter(copyDest, imageBuf)
}
size, err := io.Copy(copyDest, mr)
if err != nil {
return err
}
wholeRef := blob.RefFromHash(sha1)
if imageBuf != nil {
var conf images.Config
decodeConfig := func(r filePrefixReader) error {
conf, err = images.DecodeConfig(r)
return err
}
if err := readPrefixOrFile(imageBuf.Bytes, fetcher, b, decodeConfig); err == nil {
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
var ft time.Time
fileTime := func(r filePrefixReader) error {
ft, err = schema.FileTime(r)
return err
}
if err = readPrefixOrFile(imageBuf.Bytes, fetcher, b, fileTime); err == nil {
times = append(times, ft)
}
if exifDebug {
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
}
// TODO(mpl): find (generate?) more broken EXIF images to experiment with.
indexEXIFData := func(r filePrefixReader) error {
return indexEXIF(wholeRef, r, mm)
}
if err = readPrefixOrFile(imageBuf.Bytes, fetcher, b, indexEXIFData); err != nil {
if exifDebug {
log.Printf("error parsing EXIF: %v", err)
}
}
}
var sortTimes []time.Time
for _, t := range times {
if !t.IsZero() {
sortTimes = append(sortTimes, t)
}
}
sort.Sort(types.ByTime(sortTimes))
var time3339s string
switch {
case len(sortTimes) == 1:
time3339s = types.Time3339(sortTimes[0]).String()
case len(sortTimes) >= 2:
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
}
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime, wholeRef))
mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
if strings.HasPrefix(mime, "audio/") {
indexMusic(io.NewSectionReader(fr, 0, fr.Size()), wholeRef, mm)
}
return nil
}