本文整理匯總了Golang中camlistore/org/pkg/blobserver.EnumerateAll函數的典型用法代碼示例。如果您正苦於以下問題:Golang EnumerateAll函數的具體用法?Golang EnumerateAll怎麽用?Golang EnumerateAll使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了EnumerateAll函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestS3
func TestS3(t *testing.T) {
if *bucket == "" || *key == "" || *secret == "" {
t.Skip("Skipping test because at least one of -s3_key, -s3_secret, or -s3_bucket flags has not been provided.")
}
if !strings.HasPrefix(*bucket, "camlistore-") || !strings.HasSuffix(*bucket, "-test") {
t.Fatalf("bogus bucket name %q; must begin with 'camlistore-' and end in '-test'", *bucket)
}
storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) {
sto, err := newFromConfig(nil, jsonconfig.Obj{
"aws_access_key": *key,
"aws_secret_access_key": *secret,
"bucket": *bucket,
})
if err != nil {
t.Fatalf("newFromConfig error: %v", err)
}
if !testing.Short() {
log.Printf("Warning: this test does many serial operations. Without the go test -short flag, this test will be very slow.")
}
clearBucket := func() {
var all []blob.Ref
blobserver.EnumerateAll(context.New(), sto, func(sb blob.SizedRef) error {
t.Logf("Deleting: %v", sb.Ref)
all = append(all, sb.Ref)
return nil
})
if err := sto.RemoveBlobs(all); err != nil {
t.Fatalf("Error removing blobs during cleanup: %v", err)
}
}
clearBucket()
return sto, clearBucket
})
}
示例2: TestIsolation
func TestIsolation(t *testing.T) {
ld := test.NewLoader()
master, _ := ld.GetStorage("/good-storage/")
ns1 := newNamespace(t, ld)
ns2 := newNamespace(t, ld)
stoMap := map[string]blobserver.Storage{
"ns1": ns1,
"ns2": ns2,
"master": master,
}
want := func(src string, want ...blob.Ref) {
if _, ok := stoMap[src]; !ok {
t.Fatalf("undefined storage %q", src)
}
sort.Sort(blob.ByRef(want))
var got []blob.Ref
if err := blobserver.EnumerateAll(context.TODO(), stoMap[src], func(sb blob.SizedRef) error {
got = append(got, sb.Ref)
return nil
}); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("server %q = %q; want %q", src, got, want)
}
}
b1 := &test.Blob{Contents: "Blob 1"}
b1r := b1.BlobRef()
b2 := &test.Blob{Contents: "Blob 2"}
b2r := b2.BlobRef()
b3 := &test.Blob{Contents: "Shared Blob"}
b3r := b3.BlobRef()
b1.MustUpload(t, ns1)
want("ns1", b1r)
want("ns2")
want("master", b1r)
b2.MustUpload(t, ns2)
want("ns1", b1r)
want("ns2", b2r)
want("master", b1r, b2r)
b3.MustUpload(t, ns2)
want("ns1", b1r)
want("ns2", b2r, b3r)
want("master", b1r, b2r, b3r)
b3.MustUpload(t, ns1)
want("ns1", b1r, b3r)
want("ns2", b2r, b3r)
want("master", b1r, b2r, b3r)
if _, _, err := ns2.FetchStreaming(b1r); err == nil {
t.Errorf("b1 shouldn't be accessible via ns2")
}
}
示例3: blobserverEnumerator
func blobserverEnumerator(ctx *context.Context, src blobserver.BlobEnumerator) func(chan<- blob.SizedRef, <-chan struct{}) error {
return func(dst chan<- blob.SizedRef, intr <-chan struct{}) error {
return blobserver.EnumerateAll(ctx, src, func(sb blob.SizedRef) error {
select {
case dst <- sb:
case <-intr:
return errors.New("interrupted")
}
return nil
})
}
}
示例4: enumerateAllBlobs
func enumerateAllBlobs(s blobserver.Storage, destc chan<- blob.SizedRef) error {
// Use *client.Client's support for enumerating all blobs if
// possible, since it could probably do a better job knowing
// HTTP boundaries and such.
if nh, ok := s.(noHub); ok {
return nh.Client.SimpleEnumerateBlobs(destc)
}
defer close(destc)
return blobserver.EnumerateAll(s, func(sb blob.SizedRef) error {
destc <- sb
return nil
})
}
示例5: singleBlob
// singleBlob assumes that sto contains a single blob and returns it.
// If there are more or fewer than one blob, it's an error.
func singleBlob(sto blobserver.BlobEnumerator) (ret blob.SizedRef, err error) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
n := 0
if err = blobserver.EnumerateAll(ctx, sto, func(sb blob.SizedRef) error {
ret = sb
n++
return nil
}); err != nil {
return blob.SizedRef{}, err
}
if n != 1 {
return blob.SizedRef{}, fmt.Errorf("saw %d blobs; want 1", n)
}
return
}
示例6: TestSmallFallback
// see if storage proxies through to small for Fetch, Stat, and Enumerate.
func TestSmallFallback(t *testing.T) {
small := new(test.Fetcher)
s := &storage{
small: small,
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
b1 := &test.Blob{"foo"}
b1.MustUpload(t, small)
wantSB := b1.SizedRef()
// Fetch
rc, _, err := s.Fetch(b1.BlobRef())
if err != nil {
t.Errorf("failed to Get blob: %v", err)
} else {
rc.Close()
}
// Stat.
sb, err := blobserver.StatBlob(s, b1.BlobRef())
if err != nil {
t.Errorf("failed to Stat blob: %v", err)
} else if sb != wantSB {
t.Errorf("Stat = %v; want %v", sb, wantSB)
}
// Enumerate
saw := false
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
if err := blobserver.EnumerateAll(ctx, s, func(sb blob.SizedRef) error {
if sb != wantSB {
return fmt.Errorf("saw blob %v; want %v", sb, wantSB)
}
saw = true
return nil
}); err != nil {
t.Errorf("EnuerateAll: %v", err)
}
if !saw {
t.Error("didn't see blob in Enumerate")
}
}
示例7: enumerateAllBlobs
func enumerateAllBlobs(ctx context.Context, s blobserver.Storage, destc chan<- blob.SizedRef) error {
// Use *client.Client's support for enumerating all blobs if
// possible, since it could probably do a better job knowing
// HTTP boundaries and such.
if c, ok := s.(*client.Client); ok {
return c.SimpleEnumerateBlobs(ctx, destc)
}
defer close(destc)
return blobserver.EnumerateAll(ctx, s, func(sb blob.SizedRef) error {
select {
case destc <- sb:
case <-ctx.Done():
return ctx.Err()
}
return nil
})
}
示例8: RunOnce
// RunOnce scans a.Source and conditionally creates a new zip.
// It returns ErrSourceTooSmall if there aren't enough blobs on Source.
func (a *Archiver) RunOnce() error {
if a.Source == nil {
return errors.New("archiver: nil Source")
}
if a.Store == nil {
return errors.New("archiver: nil Store func")
}
pz := &potentialZip{a: a}
err := blobserver.EnumerateAll(context.New(), a.Source, func(sb blob.SizedRef) error {
if err := pz.addBlob(sb); err != nil {
return err
}
if pz.bigEnough() {
return errStopEnumerate
}
return nil
})
if err == errStopEnumerate {
err = nil
}
if err != nil {
return err
}
if err := pz.condClose(); err != nil {
return err
}
if !pz.bigEnough() {
return ErrSourceTooSmall
}
if err := a.Store(pz.buf.Bytes(), pz.blobs); err != nil {
return err
}
if a.DeleteSourceAfterStore {
blobs := make([]blob.Ref, 0, len(pz.blobs))
for _, sb := range pz.blobs {
blobs = append(blobs, sb.Ref)
}
if err := a.Source.RemoveBlobs(blobs); err != nil {
return err
}
}
return nil
}
示例9: testStorage
func testStorage(t *testing.T, bucketDir string) {
if *bucket == "" || *key == "" || *secret == "" {
t.Skip("Skipping test because at least one of -s3_key, -s3_secret, or -s3_bucket flags has not been provided.")
}
if !strings.HasPrefix(*bucket, "camlistore-") || !strings.HasSuffix(*bucket, "-test") {
t.Fatalf("bogus bucket name %q; must begin with 'camlistore-' and end in '-test'", *bucket)
}
bucketWithDir := path.Join(*bucket, bucketDir)
storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) {
sto, err := newFromConfig(nil, jsonconfig.Obj{
"aws_access_key": *key,
"aws_secret_access_key": *secret,
"bucket": bucketWithDir,
})
if err != nil {
t.Fatalf("newFromConfig error: %v", err)
}
if !testing.Short() {
log.Printf("Warning: this test does many serial operations. Without the go test -short flag, this test will be very slow.")
}
if bucketWithDir != *bucket {
// Adding "a", and "c" objects in the bucket to make sure objects out of the
// "directory" are not touched and have no influence.
for _, key := range []string{"a", "c"} {
var buf bytes.Buffer
md5h := md5.New()
size, err := io.Copy(io.MultiWriter(&buf, md5h), strings.NewReader(key))
if err != nil {
t.Fatalf("could not insert object %s in bucket %v: %v", key, sto.(*s3Storage).bucket, err)
}
if err := sto.(*s3Storage).s3Client.PutObject(
key, sto.(*s3Storage).bucket, md5h, size, &buf); err != nil {
t.Fatalf("could not insert object %s in bucket %v: %v", key, sto.(*s3Storage).bucket, err)
}
}
}
clearBucket := func(beforeTests bool) func() {
return func() {
var all []blob.Ref
blobserver.EnumerateAll(context.TODO(), sto, func(sb blob.SizedRef) error {
t.Logf("Deleting: %v", sb.Ref)
all = append(all, sb.Ref)
return nil
})
if err := sto.RemoveBlobs(all); err != nil {
t.Fatalf("Error removing blobs during cleanup: %v", err)
}
if beforeTests {
return
}
if bucketWithDir != *bucket {
// checking that "a" and "c" at the root were left untouched.
for _, key := range []string{"a", "c"} {
if _, _, err := sto.(*s3Storage).s3Client.Get(sto.(*s3Storage).bucket, key); err != nil {
t.Fatalf("could not find object %s after tests: %v", key, err)
}
if err := sto.(*s3Storage).s3Client.Delete(sto.(*s3Storage).bucket, key); err != nil {
t.Fatalf("could not remove object %s after tests: %v", key, err)
}
}
}
}
}
clearBucket(true)()
return sto, clearBucket(false)
})
}
示例10: readAllMetaBlobs
func (s *storage) readAllMetaBlobs() error {
type metaBlob struct {
br *blobref.BlobRef
dat []byte // encrypted blob
err error
}
metac := make(chan metaBlob, 16)
const maxInFlight = 50
var gate = make(chan bool, maxInFlight)
var stopEnumerate = make(chan bool) // closed on error
enumErrc := make(chan error, 1)
go func() {
var wg sync.WaitGroup
enumErrc <- blobserver.EnumerateAll(s.meta, func(sb blobref.SizedBlobRef) error {
select {
case <-stopEnumerate:
return errors.New("enumeration stopped")
default:
}
wg.Add(1)
gate <- true
go func() {
defer wg.Done()
defer func() { <-gate }()
rc, _, err := s.meta.FetchStreaming(sb.BlobRef)
var all []byte
if err == nil {
all, err = ioutil.ReadAll(rc)
rc.Close()
}
metac <- metaBlob{sb.BlobRef, all, err}
}()
return nil
})
wg.Wait()
close(metac)
}()
for mi := range metac {
err := mi.err
if err == nil {
err = s.processEncryptedMetaBlob(mi.br, mi.dat)
}
if err != nil {
close(stopEnumerate)
go func() {
for _ = range metac {
}
}()
// TODO: advertise in this error message a new option or environment variable
// to skip a certain or all meta blobs, to allow partial recovery, if some
// are corrupt. For now, require all to be correct.
return fmt.Errorf("Error with meta blob %v: %v", mi.br, err)
}
}
return <-enumErrc
}
示例11: testStorage
//.........這裏部分代碼省略.........
if !metadata.OnGCE() {
token, err := oauth2.ReuseTokenSource(nil,
&oauthutil.TokenSource{
Config: config,
CacheFile: *tokenCache,
AuthCode: func() string {
if *authCode == "" {
t.Skipf("Re-run using --auth_code= with the value obtained from %s",
config.AuthCodeURL("", oauth2.AccessTypeOffline, oauth2.ApprovalForce))
return ""
}
return *authCode
},
}).Token()
if err != nil {
t.Fatalf("could not acquire token: %v", err)
}
refreshToken = token.RefreshToken
}
}
bucketWithDir := path.Join(*bucket, bucketDir)
storagetest.TestOpt(t, storagetest.Opts{
New: func(t *testing.T) (sto blobserver.Storage, cleanup func()) {
sto, err := newFromConfig(nil, jsonconfig.Obj{
"bucket": bucketWithDir,
"auth": map[string]interface{}{
"client_id": *clientID,
"client_secret": *clientSecret,
"refresh_token": refreshToken,
},
})
if err != nil {
t.Fatal(err)
}
if !testing.Short() {
log.Printf("Warning: this test does many serial operations. Without the go test -short flag, this test will be very slow.")
}
// Bail if bucket is not empty
ctx := context.Background()
stor := sto.(*Storage)
objs, err := stor.client.Bucket(stor.bucket).List(ctx, nil)
if err != nil {
t.Fatalf("Error checking if bucket is empty: %v", err)
}
if len(objs.Results) != 0 {
t.Fatalf("Refusing to run test: bucket %v is not empty", *bucket)
}
if bucketWithDir != *bucket {
// Adding "a", and "c" objects in the bucket to make sure objects out of the
// "directory" are not touched and have no influence.
for _, key := range []string{"a", "c"} {
w := stor.client.Bucket(stor.bucket).Object(key).NewWriter(ctx)
if _, err := io.Copy(w, strings.NewReader(key)); err != nil {
t.Fatalf("could not insert object %s in bucket %v: %v", key, sto.(*Storage).bucket, err)
}
if err := w.Close(); err != nil {
t.Fatalf("could not insert object %s in bucket %v: %v", key, sto.(*Storage).bucket, err)
}
}
}
clearBucket := func(beforeTests bool) func() {
return func() {
var all []blob.Ref
blobserver.EnumerateAll(context.TODO(), sto, func(sb blob.SizedRef) error {
t.Logf("Deleting: %v", sb.Ref)
all = append(all, sb.Ref)
return nil
})
if err := sto.RemoveBlobs(all); err != nil {
t.Fatalf("Error removing blobs during cleanup: %v", err)
}
if beforeTests {
return
}
if bucketWithDir != *bucket {
// checking that "a" and "c" at the root were left untouched.
for _, key := range []string{"a", "c"} {
rc, err := stor.client.Bucket(stor.bucket).Object(key).NewReader(ctx)
if err != nil {
t.Fatalf("could not find object %s after tests: %v", key, err)
}
if _, err := io.Copy(ioutil.Discard, rc); err != nil {
t.Fatalf("could not find object %s after tests: %v", key, err)
}
if err := stor.client.Bucket(stor.bucket).Object(key).Delete(ctx); err != nil {
t.Fatalf("could not remove object %s after tests: %v", key, err)
}
}
}
}
}
clearBucket(true)()
return sto, clearBucket(false)
},
})
}
示例12: TestStreamer
// TestStreamer tests that the BlobStreamer bs implements all of the
// promised interface behavior and ultimately yields the provided
// blobs.
//
// If bs also implements BlobEnumerator, the two are compared for
// consistency.
func TestStreamer(t *testing.T, bs blobserver.BlobStreamer, opts ...StreamerTestOpt) {
var sawEnum map[blob.SizedRef]bool
if enumer, ok := bs.(blobserver.BlobEnumerator); ok {
sawEnum = make(map[blob.SizedRef]bool)
// First do an enumerate over all blobs as a baseline. The Streamer should
// yield the same blobs, even if it's in a different order.
enumCtx := context.New()
defer enumCtx.Cancel()
if err := blobserver.EnumerateAll(enumCtx, enumer, func(sb blob.SizedRef) error {
sawEnum[sb] = true
return nil
}); err != nil {
t.Fatalf("Enumerate: %v", err)
}
}
// See if, without cancelation, it yields the right
// result and without errors.
ch := make(chan blobserver.BlobAndToken)
errCh := make(chan error, 1)
go func() {
ctx := context.New()
defer ctx.Cancel()
errCh <- bs.StreamBlobs(ctx, ch, "")
}()
var gotRefs []blob.SizedRef
sawStreamed := map[blob.Ref]int{}
for b := range ch {
sawStreamed[b.Ref()]++
sbr := b.SizedRef()
if sawEnum != nil {
if _, ok := sawEnum[sbr]; ok {
delete(sawEnum, sbr)
} else {
t.Errorf("Streamer yielded blob not returned by Enumerate: %v", sbr)
}
}
gotRefs = append(gotRefs, sbr)
}
if err := <-errCh; err != nil {
t.Errorf("initial uninterrupted StreamBlobs error: %v", err)
}
for br, n := range sawStreamed {
if n > 1 {
t.Errorf("Streamed returned duplicate %v, %d times", br, n)
}
}
nMissing := 0
for sbr := range sawEnum {
t.Errorf("Enumerate found %v but Streamer didn't return it", sbr)
nMissing++
if nMissing == 10 && len(sawEnum) > 10 {
t.Errorf("... etc ...")
break
}
}
for _, opt := range opts {
if err := opt.verify(gotRefs); err != nil {
t.Errorf("error after first uninterrupted StreamBlobs pass: %v", err)
}
}
if t.Failed() {
return
}
// Next, the "complex pass": test a cancelation at each point,
// to test that resume works properly.
//
// Basic strategy:
// -- receive 1 blob, note the blobref, cancel.
// -- start again with that blobref, receive 2, cancel. first should be same,
// second should be new. note its blobref.
// Each iteration should yield 1 new unique blob and all but
// the first and last will return 2 blobs.
wantRefs := append([]blob.SizedRef(nil), gotRefs...) // copy
sawStreamed = map[blob.Ref]int{}
gotRefs = gotRefs[:0]
contToken := ""
for i := 0; i < len(wantRefs); i++ {
ctx := context.New()
ch := make(chan blobserver.BlobAndToken)
errc := make(chan error, 1)
go func() {
errc <- bs.StreamBlobs(ctx, ch, contToken)
}()
nrecv := 0
nextToken := ""
for bt := range ch {
nrecv++
sbr := bt.Blob.SizedRef()
isNew := len(gotRefs) == 0 || sbr != gotRefs[len(gotRefs)-1]
if isNew {
if sawStreamed[sbr.Ref] > 0 {
//.........這裏部分代碼省略.........
示例13: Reindex
func (x *Index) Reindex() error {
reindexMaxProcs.RLock()
defer reindexMaxProcs.RUnlock()
ctx := context.TODO()
wiper, ok := x.s.(sorted.Wiper)
if !ok {
return fmt.Errorf("index's storage type %T doesn't support sorted.Wiper", x.s)
}
log.Printf("Wiping index storage type %T ...", x.s)
if err := wiper.Wipe(); err != nil {
return fmt.Errorf("error wiping index's sorted key/value type %T: %v", x.s, err)
}
log.Printf("Index wiped. Rebuilding...")
reindexStart, _ := blob.Parse(os.Getenv("CAMLI_REINDEX_START"))
err := x.s.Set(keySchemaVersion.name, fmt.Sprintf("%d", requiredSchemaVersion))
if err != nil {
return err
}
var nerrmu sync.Mutex
nerr := 0
blobc := make(chan blob.Ref, 32)
enumCtx := context.TODO()
enumErr := make(chan error, 1)
go func() {
defer close(blobc)
donec := enumCtx.Done()
var lastTick time.Time
enumErr <- blobserver.EnumerateAll(enumCtx, x.blobSource, func(sb blob.SizedRef) error {
now := time.Now()
if lastTick.Before(now.Add(-1 * time.Second)) {
log.Printf("Reindexing at %v", sb.Ref)
lastTick = now
}
if reindexStart.Valid() && sb.Ref.Less(reindexStart) {
return nil
}
select {
case <-donec:
return ctx.Err()
case blobc <- sb.Ref:
return nil
}
})
}()
var wg sync.WaitGroup
for i := 0; i < reindexMaxProcs.v; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for br := range blobc {
if err := x.indexBlob(br); err != nil {
log.Printf("Error reindexing %v: %v", br, err)
nerrmu.Lock()
nerr++
nerrmu.Unlock()
// TODO: flag (or default?) to stop the EnumerateAll above once
// there's any error with reindexing?
}
}
}()
}
if err := <-enumErr; err != nil {
return err
}
wg.Wait()
x.mu.Lock()
readyCount := len(x.readyReindex)
x.mu.Unlock()
if readyCount > 0 {
return fmt.Errorf("%d blobs were ready to reindex in out-of-order queue, but not yet ran", readyCount)
}
log.Printf("Index rebuild complete.")
nerrmu.Lock() // no need to unlock
if nerr != 0 {
return fmt.Errorf("%d blobs failed to re-index", nerr)
}
if err := x.initDeletesCache(); err != nil {
return err
}
return nil
}
示例14: TestForeachZipBlob
func TestForeachZipBlob(t *testing.T) {
const fileSize = 2 << 20
const fileName = "foo.dat"
fileContents := randBytes(fileSize)
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
pt := testPack(t,
func(sto blobserver.Storage) error {
_, err := schema.WriteFileFromReader(sto, fileName, bytes.NewReader(fileContents))
return err
},
wantNumLargeBlobs(1),
wantNumSmallBlobs(0),
)
zipBlob, err := singleBlob(pt.large)
if err != nil {
t.Fatal(err)
}
zipBytes := slurpBlob(t, pt.large, zipBlob.Ref)
zipSize := len(zipBytes)
all := map[blob.Ref]blob.SizedRef{}
if err := blobserver.EnumerateAll(ctx, pt.logical, func(sb blob.SizedRef) error {
all[sb.Ref] = sb
return nil
}); err != nil {
t.Fatal(err)
}
foreachSaw := 0
blobSizeSum := 0
if err := pt.sto.foreachZipBlob(zipBlob.Ref, func(bap BlobAndPos) error {
foreachSaw++
blobSizeSum += int(bap.Size)
want, ok := all[bap.Ref]
if !ok {
t.Errorf("unwanted blob ref returned from foreachZipBlob: %v", bap.Ref)
return nil
}
delete(all, bap.Ref)
if want.Size != bap.Size {
t.Errorf("for %v, foreachZipBlob size = %d; want %d", bap.Ref, bap.Size, want.Size)
return nil
}
// Verify the offset.
h := bap.Ref.Hash()
h.Write(zipBytes[bap.Offset : bap.Offset+int64(bap.Size)])
if !bap.Ref.HashMatches(h) {
return fmt.Errorf("foreachZipBlob returned blob %v at offset %d that failed validation", bap.Ref, bap.Offset)
}
return nil
}); err != nil {
t.Fatal(err)
}
t.Logf("foreachZipBlob enumerated %d blobs", foreachSaw)
if len(all) > 0 {
t.Errorf("foreachZipBlob forgot to enumerate %d blobs: %v", len(all), all)
}
// Calculate per-blobref zip overhead (zip file headers/TOC/manifest file, etc)
zipOverhead := zipSize - blobSizeSum
t.Logf("zip fixed overhead = %d bytes, for %d blobs (%d bytes each)", zipOverhead, foreachSaw, zipOverhead/foreachSaw)
}
示例15: testPack
func testPack(t *testing.T,
write func(sto blobserver.Storage) error,
checks ...func(*packTest),
) *packTest {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
logical := new(test.Fetcher)
small, large := new(test.Fetcher), new(test.Fetcher)
pt := &packTest{
logical: logical,
small: small,
large: large,
}
// Figure out the logical baseline blobs we'll later expect in the packed storage.
if err := write(logical); err != nil {
t.Fatal(err)
}
t.Logf("items in logical storage: %d", logical.NumBlobs())
pt.sto = &storage{
small: small,
large: large,
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
pt.sto.init()
for _, setOpt := range checks {
setOpt(pt)
}
if err := write(pt.sto); err != nil {
t.Fatal(err)
}
t.Logf("items in small: %v", small.NumBlobs())
t.Logf("items in large: %v", large.NumBlobs())
if want, ok := pt.wantLargeBlobs.(int); ok && want != large.NumBlobs() {
t.Fatalf("num large blobs = %d; want %d", large.NumBlobs(), want)
}
if want, ok := pt.wantSmallBlobs.(int); ok && want != small.NumBlobs() {
t.Fatalf("num small blobs = %d; want %d", small.NumBlobs(), want)
}
var zipRefs []blob.Ref
var zipSeen = map[blob.Ref]bool{}
blobserver.EnumerateAll(ctx, large, func(sb blob.SizedRef) error {
zipRefs = append(zipRefs, sb.Ref)
zipSeen[sb.Ref] = true
return nil
})
if len(zipRefs) != large.NumBlobs() {
t.Fatalf("Enumerated only %d zip files; expected %d", len(zipRefs), large.NumBlobs())
}
bytesOfZip := map[blob.Ref][]byte{}
for _, zipRef := range zipRefs {
rc, _, err := large.Fetch(zipRef)
if err != nil {
t.Fatal(err)
}
zipBytes, err := ioutil.ReadAll(rc)
rc.Close()
if err != nil {
t.Fatalf("Error slurping %s: %v", zipRef, err)
}
if len(zipBytes) > constants.MaxBlobSize {
t.Fatalf("zip is too large: %d > max %d", len(zipBytes), constants.MaxBlobSize)
}
bytesOfZip[zipRef] = zipBytes
zr, err := zip.NewReader(bytes.NewReader(zipBytes), int64(len(zipBytes)))
if err != nil {
t.Fatalf("Error reading resulting zip file: %v", err)
}
if len(zr.File) == 0 {
t.Fatal("zip is empty")
}
nameSeen := map[string]bool{}
for i, zf := range zr.File {
if nameSeen[zf.Name] {
t.Errorf("duplicate name %q seen", zf.Name)
}
nameSeen[zf.Name] = true
t.Logf("zip[%d] size %d, %v", i, zf.UncompressedSize64, zf.Name)
}
mfr, err := zr.File[len(zr.File)-1].Open()
if err != nil {
t.Fatalf("Error opening manifest JSON: %v", err)
}
maniJSON, err := ioutil.ReadAll(mfr)
if err != nil {
t.Fatalf("Error reading manifest JSON: %v", err)
}
var mf Manifest
if err := json.Unmarshal(maniJSON, &mf); err != nil {
t.Fatalf("invalid JSON: %v", err)
}
//.........這裏部分代碼省略.........