本文整理匯總了Golang中camlistore/org/pkg/syncutil.NewGate函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewGate函數的具體用法?Golang NewGate怎麽用?Golang NewGate使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewGate函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: newUploader
func newUploader() *Uploader {
cc := client.NewOrFail()
if !*cmdmain.FlagVerbose {
cc.SetLogger(nil)
}
proxy := http.ProxyFromEnvironment
if flagProxyLocal {
proxy = proxyFromEnvironment
}
tr := cc.TransportForConfig(
&client.TransportConfig{
Proxy: proxy,
Verbose: *flagHTTP,
})
httpStats, _ := tr.(*httputil.StatsTransport)
cc.SetHTTPClient(&http.Client{Transport: tr})
pwd, err := os.Getwd()
if err != nil {
log.Fatalf("os.Getwd: %v", err)
}
return &Uploader{
Client: cc,
transport: httpStats,
pwd: pwd,
fdGate: syncutil.NewGate(100), // gate things that waste fds, assuming a low system limit
}
}
示例2: runFullValidation
func (sh *SyncHandler) runFullValidation() {
var wg sync.WaitGroup
sh.mu.Lock()
shards := sh.vshards
wg.Add(len(shards))
sh.mu.Unlock()
sh.logf("full validation beginning with %d shards", len(shards))
const maxShardWorkers = 30 // arbitrary
gate := syncutil.NewGate(maxShardWorkers)
for _, pfx := range shards {
pfx := pfx
gate.Start()
go func() {
wg.Done()
defer gate.Done()
sh.validateShardPrefix(pfx)
}()
}
wg.Wait()
sh.logf("Validation complete")
}
示例3: StatBlobs
func (s *Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
// TODO: use cache
var grp syncutil.Group
gate := syncutil.NewGate(20) // arbitrary cap
for i := range blobs {
br := blobs[i]
gate.Start()
grp.Go(func() error {
defer gate.Done()
size, exists, err := s.client.StatObject(
&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()})
if err != nil {
return err
}
if !exists {
return nil
}
if size > constants.MaxBlobSize {
return fmt.Errorf("blob %s stat size too large (%d)", br, size)
}
dest <- blob.SizedRef{Ref: br, Size: uint32(size)}
return nil
})
}
return grp.Err()
}
示例4: RemoveBlobs
func (s *storage) RemoveBlobs(blobs []blob.Ref) error {
// Plan:
// -- delete from small (if it's there)
// -- if in big, update the meta index to note that it's there, but deleted.
// -- fetch big's zip file (constructed from a ReaderAt that is all dummy zeros +
// the zip's TOC only, relying on big being a SubFetcher, and keeping info in
// the meta about the offset of the TOC+total size of each big's zip)
// -- iterate over the zip's blobs (at some point). If all are marked deleted, actually RemoveBlob
// on big to delete the full zip and then delete all the meta rows.
var (
mu sync.Mutex
unpacked []blob.Ref
packed []blob.Ref
large = map[blob.Ref]bool{} // the large blobs that packed are in
)
var grp syncutil.Group
delGate := syncutil.NewGate(removeLookups)
for _, br := range blobs {
br := br
delGate.Start()
grp.Go(func() error {
defer delGate.Done()
m, err := s.getMetaRow(br)
if err != nil {
return err
}
mu.Lock()
defer mu.Unlock()
if m.isPacked() {
packed = append(packed, br)
large[m.largeRef] = true
} else {
unpacked = append(unpacked, br)
}
return nil
})
}
if err := grp.Err(); err != nil {
return err
}
if len(unpacked) > 0 {
grp.Go(func() error {
return s.small.RemoveBlobs(unpacked)
})
}
if len(packed) > 0 {
grp.Go(func() error {
bm := s.meta.BeginBatch()
now := time.Now()
for zipRef := range large {
bm.Set("d:"+zipRef.String(), fmt.Sprint(now.Unix()))
}
for _, br := range packed {
bm.Delete("b:" + br.String())
}
return s.meta.CommitBatch(bm)
})
}
return grp.Err()
}
示例5: iterItems
func iterItems(itemch chan<- imageFile, errch chan<- error,
filter filterFunc, client *http.Client, username string) {
defer close(itemch)
albums, err := picago.GetAlbums(client, username)
if err != nil {
errch <- err
return
}
gate := syncutil.NewGate(parallelAlbumRoutines)
for _, album := range albums {
photos, err := picago.GetPhotos(client, username, album.ID)
if err != nil {
select {
case errch <- err:
default:
return
}
continue
}
gate.Start()
go func(albumName, albumTitle string) {
defer gate.Done()
for _, photo := range photos {
img := imageFile{
albumTitle: albumTitle,
albumName: albumName,
fileName: photo.Filename(),
ID: photo.ID,
}
ok, err := filter(img)
if err != nil {
errch <- err
return
}
if !ok {
continue
}
img.r, err = picago.DownloadPhoto(client, photo.URL)
if err != nil {
select {
case errch <- fmt.Errorf("Get(%s): %v", photo.URL, err):
default:
return
}
continue
}
itemch <- img
}
}(album.Name, album.Title)
}
}
示例6: NewService
// NewService builds a new Service. Zero timeout or maxProcs means no limit.
func NewService(th Thumbnailer, timeout time.Duration, maxProcs int) *Service {
var g *syncutil.Gate
if maxProcs > 0 {
g = syncutil.NewGate(maxProcs)
}
return &Service{
thumbnailer: th,
timeout: timeout,
gate: g,
}
}
示例7: Run
func (im imp) Run(ctx *importer.RunContext) (err error) {
log.Printf("pinboard: Running importer.")
r := &run{
RunContext: ctx,
im: im,
postGate: syncutil.NewGate(3),
nextCursor: time.Now().Format(timeFormat),
nextAfter: time.Now(),
lastPause: pauseInterval,
}
_, err = r.importPosts()
log.Printf("pinboard: Importer returned %v.", err)
return
}
示例8: newFromParams
func newFromParams(server string, mode auth.AuthMode) *Client {
httpClient := &http.Client{
Transport: &http.Transport{
MaxIdleConnsPerHost: maxParallelHTTP,
},
}
return &Client{
server: server,
httpClient: httpClient,
httpGate: syncutil.NewGate(maxParallelHTTP),
haveCache: noHaveCache{},
log: log.New(os.Stderr, "", log.Ldate|log.Ltime),
authMode: mode,
}
}
示例9: RemoveBlobs
func (s *Storage) RemoveBlobs(blobs []blob.Ref) error {
if s.cache != nil {
s.cache.RemoveBlobs(blobs)
}
gate := syncutil.NewGate(50) // arbitrary
var grp syncutil.Group
for i := range blobs {
gate.Start()
br := blobs[i]
grp.Go(func() error {
defer gate.Done()
return s.client.DeleteObject(&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()})
})
}
return grp.Err()
}
示例10: Run
func (imp) Run(ctx *importer.RunContext) error {
clientId, secret, err := ctx.Credentials()
if err != nil {
return err
}
acctNode := ctx.AccountNode()
ocfg := baseOAuthConfig
ocfg.ClientId, ocfg.ClientSecret = clientId, secret
token := decodeToken(acctNode.Attr(acctAttrOAuthToken))
transport := &oauth.Transport{
Config: &ocfg,
Token: &token,
Transport: notOAuthTransport(ctx.HTTPClient()),
}
ctx.Context = ctx.Context.New(context.WithHTTPClient(transport.Client()))
root := ctx.RootNode()
if root.Attr(nodeattr.Title) == "" {
if err := root.SetAttr(nodeattr.Title,
fmt.Sprintf("%s %s - Google/Picasa Photos",
acctNode.Attr(importer.AcctAttrGivenName),
acctNode.Attr(importer.AcctAttrFamilyName))); err != nil {
return err
}
}
r := &run{
RunContext: ctx,
incremental: !forceFullImport && acctNode.Attr(importer.AcctAttrCompletedVersion) == runCompleteVersion,
photoGate: syncutil.NewGate(3),
}
if err := r.importAlbums(); err != nil {
return err
}
r.mu.Lock()
anyErr := r.anyErr
r.mu.Unlock()
if !anyErr {
if err := acctNode.SetAttrs(importer.AcctAttrCompletedVersion, runCompleteVersion); err != nil {
return err
}
}
return nil
}
示例11: newUploader
func newUploader() *Uploader {
var cc *client.Client
var httpStats *httputil.StatsTransport
if d := *flagBlobDir; d != "" {
ss, err := dir.New(d)
if err != nil && d == "discard" {
ss = discardStorage{}
err = nil
}
if err != nil {
log.Fatalf("Error using dir %s as storage: %v", d, err)
}
cc = client.NewStorageClient(ss)
} else {
cc = client.NewOrFail()
proxy := http.ProxyFromEnvironment
if flagProxyLocal {
proxy = proxyFromEnvironment
}
tr := cc.TransportForConfig(
&client.TransportConfig{
Proxy: proxy,
Verbose: *flagHTTP,
})
httpStats, _ = tr.(*httputil.StatsTransport)
cc.SetHTTPClient(&http.Client{Transport: tr})
}
if *cmdmain.FlagVerbose {
cc.SetLogger(log.New(cmdmain.Stderr, "", log.LstdFlags))
} else {
cc.SetLogger(nil)
}
pwd, err := os.Getwd()
if err != nil {
log.Fatalf("os.Getwd: %v", err)
}
return &Uploader{
Client: cc,
transport: httpStats,
pwd: pwd,
fdGate: syncutil.NewGate(100), // gate things that waste fds, assuming a low system limit
}
}
示例12: loadAllChunksSync
func (fr *FileReader) loadAllChunksSync() {
gate := syncutil.NewGate(20) // num readahead chunk loads at a time
fr.ForeachChunk(func(_ []blob.Ref, p BytesPart) error {
if !p.BlobRef.Valid() {
return nil
}
gate.Start()
go func(br blob.Ref) {
defer gate.Done()
rc, _, err := fr.fetcher.Fetch(br)
if err == nil {
defer rc.Close()
var b [1]byte
rc.Read(b[:]) // fault in the blob
}
}(p.BlobRef)
return nil
})
}
示例13: TestPackTwoIdenticalfiles
func TestPackTwoIdenticalfiles(t *testing.T) {
const fileSize = 1 << 20
fileContents := randBytes(fileSize)
testPack(t,
func(sto blobserver.Storage) (err error) {
if _, err = schema.WriteFileFromReader(sto, "a.txt", bytes.NewReader(fileContents)); err != nil {
return
}
if _, err = schema.WriteFileFromReader(sto, "b.txt", bytes.NewReader(fileContents)); err != nil {
return
}
return
},
func(pt *packTest) { pt.sto.packGate = syncutil.NewGate(1) }, // one pack at a time
wantNumLargeBlobs(1),
wantNumSmallBlobs(1), // just the "b.txt" file schema blob
okayWithoutMeta("sha1-cb4399f6b3b31ace417e1ec9326f9818bb3f8387"),
)
}
示例14: StatBlobs
func (s *storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
if len(blobs) == 0 {
return nil
}
var (
grp syncutil.Group
trySmallMu sync.Mutex
trySmall []blob.Ref
)
statGate := syncutil.NewGate(50) // arbitrary
for _, br := range blobs {
br := br
statGate.Start()
grp.Go(func() error {
defer statGate.Done()
m, err := s.getMetaRow(br)
if err != nil {
return err
}
if m.exists {
dest <- blob.SizedRef{Ref: br, Size: m.size}
} else {
trySmallMu.Lock()
trySmall = append(trySmall, br)
// Assume append cannot fail or panic
trySmallMu.Unlock()
}
return nil
})
}
if err := grp.Err(); err != nil {
return err
}
if len(trySmall) == 0 {
return nil
}
return s.small.StatBlobs(dest, trySmall)
}
示例15: StatBlobs
func (ds *DiskStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
if len(blobs) == 0 {
return nil
}
statSend := func(ref blob.Ref) error {
fi, err := os.Stat(ds.blobPath(ds.partition, ref))
switch {
case err == nil && fi.Mode().IsRegular():
dest <- blob.SizedRef{Ref: ref, Size: fi.Size()}
return nil
case err != nil && !os.IsNotExist(err):
return err
}
return nil
}
if len(blobs) == 1 {
return statSend(blobs[0])
}
errc := make(chan error, len(blobs))
gt := syncutil.NewGate(maxParallelStats)
for _, ref := range blobs {
gt.Start()
go func(ref blob.Ref) {
defer gt.Done()
errc <- statSend(ref)
}(ref)
}
for _ = range blobs {
if err := <-errc; err != nil {
return err
}
}
return nil
}