当前位置: 首页>>代码示例>>Golang>>正文


Golang blobserver.ReceiveNoHash函数代码示例

本文整理汇总了Golang中camlistore/org/pkg/blobserver.ReceiveNoHash函数的典型用法代码示例。如果您正苦于以下问题:Golang ReceiveNoHash函数的具体用法?Golang ReceiveNoHash怎么用?Golang ReceiveNoHash使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了ReceiveNoHash函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: ReceiveBlob

func (s *Storage) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) {
	var buf bytes.Buffer
	size, err := io.Copy(&buf, source)
	if err != nil {
		return blob.SizedRef{}, err
	}

	b := bytes.NewReader(buf.Bytes())
	fi, err := s.b.Upload(b, s.dirPrefix+br.String(), "")
	if err != nil {
		return blob.SizedRef{}, err
	}

	if int64(fi.ContentLength) != size {
		return blob.SizedRef{}, fmt.Errorf("b2: expected ContentLength %d, got %d", size, fi.ContentLength)
	}
	if br.HashName() == "sha1" && fi.ContentSHA1 != br.Digest() {
		return blob.SizedRef{}, fmt.Errorf("b2: expected ContentSHA1 %s, got %s", br.Digest(), fi.ContentSHA1)
	}

	if s.cache != nil {
		// NoHash because it's already verified if we read it without
		// errors from the source, and uploaded it without mismatch.
		blobserver.ReceiveNoHash(s.cache, br, &buf)
	}
	return blob.SizedRef{Ref: br, Size: uint32(size)}, nil
}
开发者ID:camlistore,项目名称:camlistore,代码行数:27,代码来源:b2.go

示例2: ReceiveBlob

func (sto *replicaStorage) ReceiveBlob(br blob.Ref, src io.Reader) (_ blob.SizedRef, err error) {
	// Slurp the whole blob before replicating. Bounded by 16 MB anyway.
	var buf bytes.Buffer
	size, err := io.Copy(&buf, src)
	if err != nil {
		return
	}

	nReplicas := len(sto.replicas)
	resc := make(chan sizedBlobAndError, nReplicas)
	uploadToReplica := func(idx int, dst blobserver.BlobReceiver) {
		// Using ReceiveNoHash because it's already been
		// verified implicitly by the io.Copy above:
		sb, err := blobserver.ReceiveNoHash(dst, br, bytes.NewReader(buf.Bytes()))
		resc <- sizedBlobAndError{idx, sb, err}
	}
	for idx, replica := range sto.replicas {
		go uploadToReplica(idx, replica)
	}

	nSuccess := 0
	var fails []sizedBlobAndError
	for range sto.replicas {
		res := <-resc
		switch {
		case res.err == nil && int64(res.sb.Size) == size:
			nSuccess++
			if nSuccess == sto.minWritesForSuccess {
				return res.sb, nil
			}
		case res.err == nil:
			err = fmt.Errorf("replica: upload shard reported size %d, expected %d", res.sb.Size, size)
			res.err = err
			fails = append(fails, res)
		default:
			err = res.err
			fails = append(fails, res)
		}
	}
	for _, res := range fails {
		log.Printf("replica: receiving blob %v, %d successes, %d failures; backend %s reported: %v",
			br,
			nSuccess, len(fails),
			sto.replicaPrefixes[res.idx], res.err)
	}
	return
}
开发者ID:pombredanne,项目名称:camlistore,代码行数:47,代码来源:replica.go

示例3: uploadString

func uploadString(bs blobserver.StatReceiver, br blob.Ref, s string) (blob.Ref, error) {
	if !br.Valid() {
		panic("invalid blobref")
	}
	hasIt, err := serverHasBlob(bs, br)
	if err != nil {
		return blob.Ref{}, err
	}
	if hasIt {
		return br, nil
	}
	_, err = blobserver.ReceiveNoHash(bs, br, strings.NewReader(s))
	if err != nil {
		return blob.Ref{}, err
	}
	return br, nil
}
开发者ID:kdevroede,项目名称:camlistore,代码行数:17,代码来源:filewriter.go

示例4: ReceiveBlob

func (s *Storage) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) {
	var buf bytes.Buffer
	size, err := io.Copy(&buf, source)
	if err != nil {
		return blob.SizedRef{}, err
	}

	err = s.client.PutObject(
		&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()},
		ioutil.NopCloser(bytes.NewReader(buf.Bytes())))
	if err != nil {
		return blob.SizedRef{}, err
	}
	if s.cache != nil {
		// NoHash because it's already verified if we read it
		// without errors on the io.Copy above.
		blobserver.ReceiveNoHash(s.cache, br, bytes.NewReader(buf.Bytes()))
	}
	return blob.SizedRef{Ref: br, Size: uint32(size)}, nil
}
开发者ID:sfrdmn,项目名称:camlistore,代码行数:20,代码来源:storage.go

示例5: ReceiveBlob

func (s *storage) ReceiveBlob(plainBR blob.Ref, source io.Reader) (sb blob.SizedRef, err error) {
	iv := s.randIV()
	stream := cipher.NewCTR(s.block, iv)

	hash := plainBR.Hash()
	var buf bytes.Buffer
	// TODO: compress before encrypting?
	buf.Write(iv) // TODO: write more structured header w/ version & IV length? or does that weaken it?
	sw := cipher.StreamWriter{S: stream, W: &buf}
	plainSize, err := io.Copy(io.MultiWriter(sw, hash), source)
	if err != nil {
		return sb, err
	}
	if !plainBR.HashMatches(hash) {
		return sb, blobserver.ErrCorruptBlob
	}

	encBR := blob.SHA1FromBytes(buf.Bytes())
	_, err = blobserver.Receive(s.blobs, encBR, bytes.NewReader(buf.Bytes()))
	if err != nil {
		log.Printf("encrypt: error writing encrypted blob %v (plaintext %v): %v", encBR, plainBR, err)
		return sb, errors.New("encrypt: error writing encrypted blob")
	}

	meta := encodeMetaValue(uint32(plainSize), iv, encBR, buf.Len())
	metaBlob := s.makeSingleMetaBlob(plainBR, meta)
	_, err = blobserver.ReceiveNoHash(s.meta, blob.SHA1FromBytes(metaBlob), bytes.NewReader(metaBlob))
	if err != nil {
		log.Printf("encrypt: error writing encrypted meta for plaintext %v (encrypted blob %v): %v", plainBR, encBR, err)
		return sb, errors.New("encrypt: error writing encrypted meta")
	}

	err = s.index.Set(plainBR.String(), meta)
	if err != nil {
		return sb, fmt.Errorf("encrypt: error updating index for encrypted %v (plaintext %v): %v", encBR, plainBR, err)
	}

	return blob.SizedRef{plainBR, uint32(plainSize)}, nil
}
开发者ID:camarox53,项目名称:coreos-baremetal,代码行数:39,代码来源:encrypt.go

示例6: ReceiveBlob

func (s *Storage) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) {
	var buf bytes.Buffer
	size, err := io.Copy(&buf, source)
	if err != nil {
		return blob.SizedRef{}, err
	}

	// TODO(mpl): use context from caller, once one is available (issue 733)
	w := s.client.Bucket(s.bucket).Object(s.dirPrefix + br.String()).NewWriter(context.TODO())
	if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(buf.Bytes()))); err != nil {
		return blob.SizedRef{}, err
	}
	if err := w.Close(); err != nil {
		return blob.SizedRef{}, err
	}

	if s.cache != nil {
		// NoHash because it's already verified if we read it
		// without errors on the io.Copy above.
		blobserver.ReceiveNoHash(s.cache, br, bytes.NewReader(buf.Bytes()))
	}
	return blob.SizedRef{Ref: br, Size: uint32(size)}, nil
}
开发者ID:pombredanne,项目名称:camlistore,代码行数:23,代码来源:storage.go

示例7: ReceiveBlob

func (sto *s3Storage) ReceiveBlob(b blob.Ref, source io.Reader) (sr blob.SizedRef, err error) {
	var buf bytes.Buffer
	md5h := md5.New()

	size, err := io.Copy(io.MultiWriter(&buf, md5h), source)
	if err != nil {
		return sr, err
	}

	if faultReceive.FailErr(&err) {
		return
	}

	err = sto.s3Client.PutObject(sto.dirPrefix+b.String(), sto.bucket, md5h, size, &buf)
	if err != nil {
		return sr, err
	}
	if sto.cache != nil {
		// NoHash because it's already verified if we read it
		// without errors on the io.Copy above.
		blobserver.ReceiveNoHash(sto.cache, b, bytes.NewReader(buf.Bytes()))
	}
	return blob.SizedRef{Ref: b, Size: uint32(size)}, nil
}
开发者ID:rfistman,项目名称:camlistore,代码行数:24,代码来源:receive.go

示例8: vivify

// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blob.SizedRef) error {
	sf, ok := blobReceiver.(blob.StreamingFetcher)
	if !ok {
		return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
	}
	fetcher := blob.SeekerFromStreamingFetcher(sf)
	fr, err := schema.NewFileReader(fetcher, fileblob.Ref)
	if err != nil {
		return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.Ref.String(), err)
	}
	defer fr.Close()

	h := sha1.New()
	n, err := io.Copy(h, fr)
	if err != nil {
		return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.Ref.String(), err)
	}
	if n != fr.Size() {
		return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.Ref.String(), fr.Size(), n)
	}

	config := blobReceiver.Config()
	if config == nil {
		return errors.New("blobReceiver has no config")
	}
	hf := config.HandlerFinder
	if hf == nil {
		return errors.New("blobReceiver config has no HandlerFinder")
	}
	JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
	if err != nil || sh == nil {
		return errors.New("jsonsign handler not found")
	}
	sigHelper, ok := sh.(*signhandler.Handler)
	if !ok {
		return errors.New("handler is not a JSON signhandler")
	}
	discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
	publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
	if !ok {
		return fmt.Errorf("Discovery: json decoding error: %v", err)
	}

	// The file schema must have a modtime to vivify, as the modtime is used for all three of:
	// 1) the permanode's signature
	// 2) the camliContent attribute claim's "claimDate"
	// 3) the signature time of 2)
	claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
	if err != nil {
		return fmt.Errorf("While parsing modtime for file %v: %v", fr.FileSchema().FileName, err)
	}

	permanodeBB := schema.NewHashPlannedPermanode(h)
	permanodeBB.SetSigner(blob.MustParse(publicKeyBlobRef))
	permanodeBB.SetClaimDate(claimDate)
	permanodeSigned, err := sigHelper.Sign(permanodeBB)
	if err != nil {
		return fmt.Errorf("Signing permanode %v: %v", permanodeSigned, err)
	}
	permanodeRef := blob.SHA1FromString(permanodeSigned)
	_, err = blobserver.ReceiveNoHash(blobReceiver, permanodeRef, strings.NewReader(permanodeSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed permanode %v, %v: %v", permanodeRef, permanodeSigned, err)
	}

	contentClaimBB := schema.NewSetAttributeClaim(permanodeRef, "camliContent", fileblob.Ref.String())
	contentClaimBB.SetSigner(blob.MustParse(publicKeyBlobRef))
	contentClaimBB.SetClaimDate(claimDate)
	contentClaimSigned, err := sigHelper.Sign(contentClaimBB)
	if err != nil {
		return fmt.Errorf("Signing camliContent claim: %v", err)
	}
	contentClaimRef := blob.SHA1FromString(contentClaimSigned)
	_, err = blobserver.ReceiveNoHash(blobReceiver, contentClaimRef, strings.NewReader(contentClaimSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed camliContent claim %v, %v: %v", contentClaimRef, contentClaimSigned, err)
	}
	return nil
}
开发者ID:rakyll,项目名称:camlistore,代码行数:82,代码来源:upload.go

示例9: writeAZip


//.........这里部分代码省略.........
	}
	mf.DataBlobsOrigin = blob.RefFromHash(chunkWholeHash)

	// zipBlobs is where a schema or data blob is relative to the beginning
	// of the zip file.
	var zipBlobs []BlobAndPos

	var dataOffset int64
	for _, br := range dataRefsWritten {
		size := pk.dataSize[br]
		mf.DataBlobs = append(mf.DataBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: size}, dataOffset})

		zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: size}, dataStart + dataOffset})
		dataOffset += int64(size)
	}

	for _, br := range schemaBlobs {
		fw, err := zw.CreateHeader(&zip.FileHeader{
			Name:   "camlistore/" + br.String() + ".json",
			Method: zip.Store, // uncompressed
		})
		check(err)
		check(zw.Flush())
		b := pk.schemaBlob[br]
		zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: b.Size()}, cw.n})
		rc := b.Open()
		n, err := io.Copy(fw, rc)
		rc.Close()
		check(err)
		if n != int64(b.Size()) {
			return fmt.Errorf("failed to write all of schema blob %v: %d bytes, not wanted %d", br, n, b.Size())
		}
	}

	// Manifest file
	fw, err = zw.Create(zipManifestPath)
	check(err)
	enc, err := json.MarshalIndent(mf, "", "  ")
	check(err)
	_, err = fw.Write(enc)
	check(err)
	err = zw.Close()
	check(err)

	if zbuf.Len() > zipMax {
		// We guessed wrong. Back up. Find out how many blobs we went over.
		overage := zbuf.Len() - zipMax
		for i := len(dataRefsWritten) - 1; i >= 0; i-- {
			dr := dataRefsWritten[i]
			if overage <= 0 {
				return needsTruncatedAfterError{dr}
			}
			overage -= int(pk.dataSize[dr])
		}
		return errors.New("file is unpackable; first blob is too big to fit")
	}

	zipRef := blob.SHA1FromBytes(zbuf.Bytes())
	zipSB, err := blobserver.ReceiveNoHash(pk.s.large, zipRef, bytes.NewReader(zbuf.Bytes()))
	if err != nil {
		return err
	}

	bm := pk.s.meta.BeginBatch()
	bm.Set(fmt.Sprintf("%s%s:%d", wholeMetaPrefix, pk.wholeRef, len(pk.zips)),
		fmt.Sprintf("%s %d %d %d",
			zipRef,
			dataStart,
			pk.wholeBytesWritten,
			dataBytesWritten))

	pk.wholeBytesWritten += dataBytesWritten
	pk.zips = append(pk.zips, writtenZip{
		SizedRef: zipSB,
		dataRefs: dataRefsWritten,
	})

	for _, zb := range zipBlobs {
		bm.Set(blobMetaPrefix+zb.Ref.String(), fmt.Sprintf("%d %v %d", zb.Size, zipRef, zb.Offset))
	}
	if err := pk.s.meta.CommitBatch(bm); err != nil {
		return err
	}

	// Delete from small
	if !pk.s.skipDelete {
		toDelete := make([]blob.Ref, 0, len(dataRefsWritten)+len(schemaBlobs))
		toDelete = append(toDelete, dataRefsWritten...)
		toDelete = append(toDelete, schemaBlobs...)
		if err := pk.s.small.RemoveBlobs(toDelete); err != nil {
			// Can't really do anything about it and doesn't really matter, so
			// just log for now.
			pk.s.Logf("Error removing blobs from %s: %v", pk.s.small, err)
		}
	}

	// On success, consume the chunks we wrote from pk.chunksRemain.
	pk.chunksRemain = pk.chunksRemain[len(dataRefsWritten):]
	return nil
}
开发者ID:camlistore,项目名称:camlistore,代码行数:101,代码来源:blobpacked.go


注:本文中的camlistore/org/pkg/blobserver.ReceiveNoHash函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。