本文整理匯總了Golang中camlistore/org/pkg/schema.Superset類的典型用法代碼示例。如果您正苦於以下問題:Golang Superset類的具體用法?Golang Superset怎麽用?Golang Superset使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Superset類的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: fetchSchemaSuperset
// Errors returned are:
// os.ErrNotExist -- blob not found
// os.ErrInvalid -- not JSON or a camli schema blob
func (fs *CamliFileSystem) fetchSchemaSuperset(br *blobref.BlobRef) (*schema.Superset, error) {
blobStr := br.String()
if ss, ok := fs.blobToSchema.Get(blobStr); ok {
return ss.(*schema.Superset), nil
}
rsc, _, err := fs.fetcher.Fetch(br)
if err != nil {
return nil, err
}
defer rsc.Close()
jd := json.NewDecoder(rsc)
ss := new(schema.Superset)
err = jd.Decode(ss)
if err != nil {
log.Printf("Error parsing %s as schema blob: %v", br, err)
return nil, os.ErrInvalid
}
if ss.Type == "" {
log.Printf("blob %s is JSON but lacks camliType", br)
return nil, os.ErrInvalid
}
ss.BlobRef = br
fs.blobToSchema.Add(blobStr, ss)
return ss, nil
}
示例2: populateFile
// blobref: of the file or schema blob
// ss: the parsed file schema blob
// bm: keys to populate
func (ix *Index) populateFile(blobRef *blobref.BlobRef, ss *schema.Superset, bm BatchMutation) error {
seekFetcher, err := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
if err != nil {
return err
}
sha1 := sha1.New()
fr, err := ss.NewFileReader(seekFetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
// Basically the same as the TODO just below.
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
return nil
}
mime, reader := magic.MimeTypeFromReader(fr)
size, err := io.Copy(sha1, reader)
if err != nil {
// TODO: job scheduling system to retry this spaced
// out max n times. Right now our options are
// ignoring this error (forever) or returning the
// error and making the indexing try again (likely
// forever failing). Both options suck. For now just
// log and act like all's okay.
log.Printf("index: error indexing file %s: %v", blobRef, err)
return nil
}
wholeRef := blobref.FromHash("sha1", sha1)
bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, ss.FileName, mime))
return nil
}
示例3: populateFile
// blobref: of the file or schema blob
// ss: the parsed file schema blob
// bm: keys to populate
func (ix *Index) populateFile(blobRef *blobref.BlobRef, ss *schema.Superset, bm BatchMutation) error {
seekFetcher := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
fr, err := ss.NewFileReader(seekFetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
// Basically the same as the TODO just below.
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
return nil
}
defer fr.Close()
mime, reader := magic.MimeTypeFromReader(fr)
sha1 := sha1.New()
var copyDest io.Writer = sha1
var withCopyErr func(error) // or nil
if strings.HasPrefix(mime, "image/") {
pr, pw := io.Pipe()
copyDest = io.MultiWriter(copyDest, pw)
confc := make(chan *image.Config, 1)
go func() {
conf, _, err := image.DecodeConfig(pr)
defer io.Copy(ioutil.Discard, pr)
if err == nil {
confc <- &conf
} else {
confc <- nil
}
}()
withCopyErr = func(err error) {
pw.CloseWithError(err)
if conf := <-confc; conf != nil {
bm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
}
}
}
size, err := io.Copy(copyDest, reader)
if f := withCopyErr; f != nil {
f(err)
}
if err != nil {
// TODO: job scheduling system to retry this spaced
// out max n times. Right now our options are
// ignoring this error (forever) or returning the
// error and making the indexing try again (likely
// forever failing). Both options suck. For now just
// log and act like all's okay.
log.Printf("index: error indexing file %s: %v", blobRef, err)
return nil
}
wholeRef := blobref.FromHash("sha1", sha1)
bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, ss.FileName, mime))
return nil
}
示例4: setFileMeta
func setFileMeta(name string, sc *schema.Superset) error {
if err := os.Chmod(name, sc.FileMode()); err != nil {
return err
}
if err := os.Chown(name, sc.UnixOwnerId, sc.UnixGroupId); err != nil {
return err
}
t, err := time.Parse(time.RFC3339, sc.UnixMtime)
if err != nil {
return nil
}
return os.Chtimes(name, t, t)
}
示例5: setFileMeta
func setFileMeta(name string, sc *schema.Superset) error {
err1 := os.Chmod(name, sc.FileMode())
var err2 error
if mt := sc.ModTime(); !mt.IsZero() {
err2 = os.Chtimes(name, mt, mt)
}
err3 := os.Chown(name, sc.UnixOwnerId, sc.UnixGroupId)
// Return first non-nil error for logging.
for _, err := range []error{err1, err2, err3} {
if err != nil {
return err
}
}
return nil
}
示例6: populateDir
// blobref: of the file or schema blob
// ss: the parsed file schema blob
// bm: keys to populate
func (ix *Index) populateDir(blobRef *blobref.BlobRef, ss *schema.Superset, bm BatchMutation) error {
seekFetcher := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
dr, err := ss.NewDirReader(seekFetcher)
if err != nil {
// TODO(bradfitz): propagate up a transient failure
// error type, so we can retry indexing files in the
// future if blobs are only temporarily unavailable.
log.Printf("index: error indexing directory, creating NewDirReader %s: %v", blobRef, err)
return nil
}
sts, err := dr.StaticSet()
if err != nil {
log.Printf("index: error indexing directory: can't get StaticSet: %v\n", err)
return nil
}
bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(len(sts), ss.FileName, ""))
return nil
}