本文整理匯總了Golang中camlistore/org/pkg/test.NewLogger函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewLogger函數的具體用法?Golang NewLogger怎麽用?Golang NewLogger使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewLogger函數的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestStorage
func TestStorage(t *testing.T) {
storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) {
s := &storage{
small: new(test.Fetcher),
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
return s, func() {}
})
}
示例2: testStreamBlobs
func testStreamBlobs(t *testing.T,
small blobserver.Storage,
large subFetcherStorage,
populate func(*testing.T, *storage) []storagetest.StreamerTestOpt) {
s := &storage{
small: small,
large: large,
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
wants := populate(t, s)
storagetest.TestStreamer(t, s, wants...)
}
示例3: TestStorageNoSmallSubfetch
func TestStorageNoSmallSubfetch(t *testing.T) {
storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) {
s := &storage{
// We need to hide SubFetcher, to test *storage's SubFetch, as it delegates
// to the underlying SubFetcher, if small implements that interface.
small: hideSubFetcher(new(test.Fetcher)),
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
return s, func() {}
})
}
示例4: TestSmallFallback
// see if storage proxies through to small for Fetch, Stat, and Enumerate.
func TestSmallFallback(t *testing.T) {
small := new(test.Fetcher)
s := &storage{
small: small,
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
b1 := &test.Blob{"foo"}
b1.MustUpload(t, small)
wantSB := b1.SizedRef()
// Fetch
rc, _, err := s.Fetch(b1.BlobRef())
if err != nil {
t.Errorf("failed to Get blob: %v", err)
} else {
rc.Close()
}
// Stat.
sb, err := blobserver.StatBlob(s, b1.BlobRef())
if err != nil {
t.Errorf("failed to Stat blob: %v", err)
} else if sb != wantSB {
t.Errorf("Stat = %v; want %v", sb, wantSB)
}
// Enumerate
saw := false
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
if err := blobserver.EnumerateAll(ctx, s, func(sb blob.SizedRef) error {
if sb != wantSB {
return fmt.Errorf("saw blob %v; want %v", sb, wantSB)
}
saw = true
return nil
}); err != nil {
t.Errorf("EnuerateAll: %v", err)
}
if !saw {
t.Error("didn't see blob in Enumerate")
}
}
示例5: TestStreamBlobs
func TestStreamBlobs(t *testing.T) {
small := new(test.Fetcher)
s := &storage{
small: small,
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
all := map[blob.Ref]bool{}
const nBlobs = 10
for i := 0; i < nBlobs; i++ {
b := &test.Blob{strconv.Itoa(i)}
b.MustUpload(t, small)
all[b.BlobRef()] = true
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
token := "" // beginning
got := map[blob.Ref]bool{}
dest := make(chan blobserver.BlobAndToken, 16)
done := make(chan bool)
go func() {
defer close(done)
for bt := range dest {
got[bt.Blob.Ref()] = true
}
}()
err := s.StreamBlobs(ctx, dest, token)
if err != nil {
t.Fatalf("StreamBlobs = %v", err)
}
<-done
if !reflect.DeepEqual(got, all) {
t.Errorf("Got blobs %v; want %v", got, all)
}
storagetest.TestStreamer(t, s, storagetest.WantN(nBlobs))
}
示例6: TestPackerBoundarySplits
func TestPackerBoundarySplits(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow test")
}
// Test a file of three chunk sizes, totalling near the 16 MB
// boundary:
// - 1st chunk is 6 MB. ("blobA")
// - 2nd chunk is 6 MB. ("blobB")
// - 3rd chunk ("blobC") is binary-searched (up to 4MB) to find
// which size causes the packer to write two zip files.
// During the test we set zip overhead boundaries to 0, to
// force the test to into its pathological misprediction code paths,
// where it needs to back up and rewrite the zip with one part less.
// That's why the test starts with two zip files: so there's at
// least one that can be removed to make room.
defer setIntTemporarily(&zipPerEntryOverhead, 0)()
const sizeAB = 12 << 20
const maxBlobSize = 16 << 20
bytesAB := randBytes(sizeAB)
blobA := &test.Blob{string(bytesAB[:sizeAB/2])}
blobB := &test.Blob{string(bytesAB[sizeAB/2:])}
refA := blobA.BlobRef()
refB := blobB.BlobRef()
bytesCFull := randBytes(maxBlobSize - sizeAB) // will be sliced down
// Mechanism to verify we hit the back-up code path:
var (
mu sync.Mutex
sawTruncate blob.Ref
stoppedBeforeOverflow bool
)
testHookSawTruncate = func(after blob.Ref) {
if after != refB {
t.Errorf("unexpected truncate point %v", after)
}
mu.Lock()
defer mu.Unlock()
sawTruncate = after
}
testHookStopBeforeOverflowing = func() {
mu.Lock()
defer mu.Unlock()
stoppedBeforeOverflow = true
}
defer func() {
testHookSawTruncate = nil
testHookStopBeforeOverflowing = nil
}()
generatesTwoZips := func(sizeC int) (ret bool) {
large := new(test.Fetcher)
s := &storage{
small: new(test.Fetcher),
large: large,
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: ",
// Ignore these phrases:
"Packing file ",
"Packed file ",
),
}
s.init()
// Upload first two chunks
blobA.MustUpload(t, s)
blobB.MustUpload(t, s)
// Upload second chunk
bytesC := bytesCFull[:sizeC]
h := blob.NewHash()
h.Write(bytesC)
refC := blob.RefFromHash(h)
_, err := s.ReceiveBlob(refC, bytes.NewReader(bytesC))
if err != nil {
t.Fatal(err)
}
// Upload the file schema blob.
m := schema.NewFileMap("foo.dat")
m.PopulateParts(sizeAB+int64(sizeC), []schema.BytesPart{
schema.BytesPart{
Size: sizeAB / 2,
BlobRef: refA,
},
schema.BytesPart{
Size: sizeAB / 2,
BlobRef: refB,
},
schema.BytesPart{
Size: uint64(sizeC),
BlobRef: refC,
},
})
fjson, err := m.JSON()
if err != nil {
t.Fatalf("schema filemap JSON: %v", err)
}
fb := &test.Blob{Contents: fjson}
//.........這裏部分代碼省略.........
示例7: TestRemoveBlobs
func TestRemoveBlobs(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
// The basic small cases are handled via storagetest in TestStorage,
// so this only tests removing packed blobs.
small := new(test.Fetcher)
large := new(test.Fetcher)
sto := &storage{
small: small,
large: large,
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
sto.init()
const fileSize = 1 << 20
fileContents := randBytes(fileSize)
if _, err := schema.WriteFileFromReader(sto, "foo.dat", bytes.NewReader(fileContents)); err != nil {
t.Fatal(err)
}
if small.NumBlobs() != 0 || large.NumBlobs() == 0 {
t.Fatalf("small, large counts == %d, %d; want 0, non-zero", small.NumBlobs(), large.NumBlobs())
}
var all []blob.SizedRef
if err := blobserver.EnumerateAll(ctx, sto, func(sb blob.SizedRef) error {
all = append(all, sb)
return nil
}); err != nil {
t.Fatal(err)
}
// Find the zip
zipBlob, err := singleBlob(sto.large)
if err != nil {
t.Fatalf("failed to find packed zip: %v", err)
}
// The zip file is in use, so verify we can't delete it.
if err := sto.deleteZipPack(zipBlob.Ref); err == nil {
t.Fatalf("zip pack blob deleted but it should not have been allowed")
}
// Delete everything
for len(all) > 0 {
del := all[0].Ref
all = all[1:]
if err := sto.RemoveBlobs([]blob.Ref{del}); err != nil {
t.Fatalf("RemoveBlobs: %v", err)
}
if err := storagetest.CheckEnumerate(sto, all); err != nil {
t.Fatalf("After deleting %v, %v", del, err)
}
}
dRows := func() (n int) {
if err := sorted.ForeachInRange(sto.meta, "d:", "", func(key, value string) error {
if strings.HasPrefix(key, "d:") {
n++
}
return nil
}); err != nil {
t.Fatalf("meta iteration error: %v", err)
}
return
}
if n := dRows(); n == 0 {
t.Fatalf("expected a 'd:' row after deletes")
}
// TODO: test the background pack-deleter loop? figure out its design first.
if err := sto.deleteZipPack(zipBlob.Ref); err != nil {
t.Errorf("error deleting zip %v: %v", zipBlob.Ref, err)
}
if n := dRows(); n != 0 {
t.Errorf("expected the 'd:' row to be deleted")
}
}
示例8: testPack
func testPack(t *testing.T,
write func(sto blobserver.Storage) error,
checks ...func(*packTest),
) *packTest {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
logical := new(test.Fetcher)
small, large := new(test.Fetcher), new(test.Fetcher)
pt := &packTest{
logical: logical,
small: small,
large: large,
}
// Figure out the logical baseline blobs we'll later expect in the packed storage.
if err := write(logical); err != nil {
t.Fatal(err)
}
t.Logf("items in logical storage: %d", logical.NumBlobs())
pt.sto = &storage{
small: small,
large: large,
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
pt.sto.init()
for _, setOpt := range checks {
setOpt(pt)
}
if err := write(pt.sto); err != nil {
t.Fatal(err)
}
t.Logf("items in small: %v", small.NumBlobs())
t.Logf("items in large: %v", large.NumBlobs())
if want, ok := pt.wantLargeBlobs.(int); ok && want != large.NumBlobs() {
t.Fatalf("num large blobs = %d; want %d", large.NumBlobs(), want)
}
if want, ok := pt.wantSmallBlobs.(int); ok && want != small.NumBlobs() {
t.Fatalf("num small blobs = %d; want %d", small.NumBlobs(), want)
}
var zipRefs []blob.Ref
var zipSeen = map[blob.Ref]bool{}
blobserver.EnumerateAll(ctx, large, func(sb blob.SizedRef) error {
zipRefs = append(zipRefs, sb.Ref)
zipSeen[sb.Ref] = true
return nil
})
if len(zipRefs) != large.NumBlobs() {
t.Fatalf("Enumerated only %d zip files; expected %d", len(zipRefs), large.NumBlobs())
}
bytesOfZip := map[blob.Ref][]byte{}
for _, zipRef := range zipRefs {
rc, _, err := large.Fetch(zipRef)
if err != nil {
t.Fatal(err)
}
zipBytes, err := ioutil.ReadAll(rc)
rc.Close()
if err != nil {
t.Fatalf("Error slurping %s: %v", zipRef, err)
}
if len(zipBytes) > constants.MaxBlobSize {
t.Fatalf("zip is too large: %d > max %d", len(zipBytes), constants.MaxBlobSize)
}
bytesOfZip[zipRef] = zipBytes
zr, err := zip.NewReader(bytes.NewReader(zipBytes), int64(len(zipBytes)))
if err != nil {
t.Fatalf("Error reading resulting zip file: %v", err)
}
if len(zr.File) == 0 {
t.Fatal("zip is empty")
}
nameSeen := map[string]bool{}
for i, zf := range zr.File {
if nameSeen[zf.Name] {
t.Errorf("duplicate name %q seen", zf.Name)
}
nameSeen[zf.Name] = true
t.Logf("zip[%d] size %d, %v", i, zf.UncompressedSize64, zf.Name)
}
mfr, err := zr.File[len(zr.File)-1].Open()
if err != nil {
t.Fatalf("Error opening manifest JSON: %v", err)
}
maniJSON, err := ioutil.ReadAll(mfr)
if err != nil {
t.Fatalf("Error reading manifest JSON: %v", err)
}
var mf Manifest
if err := json.Unmarshal(maniJSON, &mf); err != nil {
t.Fatalf("invalid JSON: %v", err)
}
//.........這裏部分代碼省略.........