本文整理匯總了Golang中camlistore/org/pkg/syncutil.Group類的典型用法代碼示例。如果您正苦於以下問題:Golang Group類的具體用法?Golang Group怎麽用?Golang Group使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Group類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: StatBlobs
func (s *Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
// TODO: use cache
var grp syncutil.Group
gate := syncutil.NewGate(20) // arbitrary cap
for i := range blobs {
br := blobs[i]
gate.Start()
grp.Go(func() error {
defer gate.Done()
size, exists, err := s.client.StatObject(
&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()})
if err != nil {
return err
}
if !exists {
return nil
}
if size > constants.MaxBlobSize {
return fmt.Errorf("blob %s stat size too large (%d)", br, size)
}
dest <- blob.SizedRef{Ref: br, Size: uint32(size)}
return nil
})
}
return grp.Err()
}
示例2: StatBlobs
func (ds *DiskStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
if len(blobs) == 0 {
return nil
}
statSend := func(ref blob.Ref) error {
fi, err := os.Stat(ds.blobPath(ds.partition, ref))
switch {
case err == nil && fi.Mode().IsRegular():
dest <- blob.SizedRef{Ref: ref, Size: fi.Size()}
return nil
case err != nil && !os.IsNotExist(err):
return err
}
return nil
}
if len(blobs) == 1 {
return statSend(blobs[0])
}
var wg syncutil.Group
for _, ref := range blobs {
ref := ref
statGate.Start()
wg.Go(func() error {
defer statGate.Done()
return statSend(ref)
})
}
return wg.Err()
}
示例3: StatBlobs
func (sto *s3Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) (err error) {
if faultStat.FailErr(&err) {
return
}
// TODO: use sto.cache
var wg syncutil.Group
for _, br := range blobs {
br := br
statGate.Start()
wg.Go(func() error {
defer statGate.Done()
size, err := sto.s3Client.Stat(br.String(), sto.bucket)
if err == nil {
dest <- blob.SizedRef{Ref: br, Size: uint32(size)}
return nil
}
if err == os.ErrNotExist {
return nil
}
return fmt.Errorf("error statting %v: %v", br, err)
})
}
return wg.Err()
}
示例4: NotifyBlobReceived
func (h *memHub) NotifyBlobReceived(sb blob.SizedRef) error {
h.mu.RLock()
defer h.mu.RUnlock()
br := sb.Ref
// Synchronous hooks. If error, prevents notifying other
// subscribers.
var grp syncutil.Group
for i := range h.hooks {
hook := h.hooks[i]
grp.Go(func() error { return hook(sb) })
}
if err := grp.Err(); err != nil {
return err
}
// Global listeners
for ch := range h.listeners {
ch := ch
go func() { ch <- br }()
}
// Blob-specific listeners
for ch := range h.blobListeners[br] {
ch := ch
go func() { ch <- br }()
}
return nil
}
示例5: setFirewall
// setFirewall adds the firewall rules needed for ports 80 & 433 to the default network.
func (d *Deployer) setFirewall(ctx *context.Context, computeService *compute.Service) error {
defaultNet, err := computeService.Networks.Get(d.Conf.Project, "default").Do()
if err != nil {
return fmt.Errorf("error getting default network: %v", err)
}
needRules := map[string]compute.Firewall{
"default-allow-http": compute.Firewall{
Name: "default-allow-http",
SourceRanges: []string{"0.0.0.0/0"},
SourceTags: []string{"http-server"},
Allowed: []*compute.FirewallAllowed{{"tcp", []string{"80"}}},
Network: defaultNet.SelfLink,
},
"default-allow-https": compute.Firewall{
Name: "default-allow-https",
SourceRanges: []string{"0.0.0.0/0"},
SourceTags: []string{"https-server"},
Allowed: []*compute.FirewallAllowed{{"tcp", []string{"443"}}},
Network: defaultNet.SelfLink,
},
}
rules, err := computeService.Firewalls.List(d.Conf.Project).Do()
if err != nil {
return fmt.Errorf("error listing rules: %v", err)
}
for _, it := range rules.Items {
delete(needRules, it.Name)
}
if len(needRules) == 0 {
return nil
}
if Verbose {
log.Printf("Need to create rules: %v", needRules)
}
var wg syncutil.Group
for name, rule := range needRules {
if ctx.IsCanceled() {
return context.ErrCanceled
}
name, rule := name, rule
wg.Go(func() error {
if Verbose {
log.Printf("Creating rule %s", name)
}
r, err := computeService.Firewalls.Insert(d.Conf.Project, &rule).Do()
if err != nil {
return fmt.Errorf("error creating rule %s: %v", name, err)
}
if Verbose {
log.Printf("Created rule %s: %+v", name, r)
}
return nil
})
}
return wg.Err()
}
示例6: testEnumerate
func testEnumerate(t *testing.T, sto blobserver.Storage, wantUnsorted []blob.SizedRef, opts ...interface{}) {
var after string
var n = 1000
for _, opt := range opts {
switch v := opt.(type) {
case string:
after = v
case int:
n = v
default:
panic("bad option of type " + fmt.Sprint("%T", v))
}
}
want := append([]blob.SizedRef(nil), wantUnsorted...)
sort.Sort(blob.SizedByRef(want))
sbc := make(chan blob.SizedRef, 10)
var got []blob.SizedRef
var grp syncutil.Group
sawEnd := make(chan bool, 1)
grp.Go(func() error {
if err := sto.EnumerateBlobs(context.New(), sbc, after, n); err != nil {
return fmt.Errorf("EnumerateBlobs(%q, %d): %v", after, n)
}
return nil
})
grp.Go(func() error {
for sb := range sbc {
if !sb.Valid() {
return fmt.Errorf("invalid blobref %#v received in enumerate", sb)
}
got = append(got, sb)
}
sawEnd <- true
return nil
})
grp.Go(func() error {
select {
case <-sawEnd:
return nil
case <-time.After(10 * time.Second):
return errors.New("timeout waiting for EnumerateBlobs to close its channel")
}
})
if err := grp.Err(); err != nil {
t.Fatalf("Enumerate error: %v", err)
return
}
if len(got) == 0 && len(want) == 0 {
return
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("Enumerate mismatch. Got %d; want %d.\n Got: %v\nWant: %v\n",
len(got), len(want), got, want)
}
}
示例7: RemoveBlobs
func (sto *s3Storage) RemoveBlobs(blobs []blob.Ref) error {
var wg syncutil.Group
for _, blob := range blobs {
blob := blob
removeGate.Start()
wg.Go(func() error {
defer removeGate.Done()
return sto.s3Client.Delete(sto.bucket, blob.String())
})
}
return wg.Err()
}
示例8: projectHasInstance
// projectHasInstance checks for all the possible zones if there's already an instance for the project.
// It returns the name of the zone at the first instance it finds, if any.
func (d *Deployer) projectHasInstance() (zone string, err error) {
s, err := compute.New(d.Client)
if err != nil {
return "", err
}
// TODO(mpl): make use of the handler's cached zones.
zl, err := compute.NewZonesService(s).List(d.Conf.Project).Do()
if err != nil {
return "", fmt.Errorf("could not get a list of zones: %v", err)
}
computeService, _ := compute.New(d.Client)
var zoneOnce sync.Once
var grp syncutil.Group
errc := make(chan error, 1)
zonec := make(chan string, 1)
timeout := time.NewTimer(30 * time.Second)
defer timeout.Stop()
for _, z := range zl.Items {
z := z
grp.Go(func() error {
list, err := computeService.Instances.List(d.Conf.Project, z.Name).Do()
if err != nil {
return fmt.Errorf("could not list existing instances: %v", err)
}
if len(list.Items) > 0 {
zoneOnce.Do(func() {
zonec <- z.Name
})
}
return nil
})
}
go func() {
errc <- grp.Err()
}()
// We block until either an instance was found in a zone, or all the instance
// listing is done. Or we timed-out.
select {
case err = <-errc:
return "", err
case zone = <-zonec:
// We voluntarily ignore any listing error if we found at least one instance
// because that's what we primarily want to report about.
return zone, nil
case <-timeout.C:
return "", errors.New("timed out")
}
}
示例9: RemoveBlobs
func (s *Storage) RemoveBlobs(blobs []blob.Ref) error {
if s.cache != nil {
s.cache.RemoveBlobs(blobs)
}
gate := syncutil.NewGate(50) // arbitrary
var grp syncutil.Group
for i := range blobs {
gate.Start()
br := blobs[i]
grp.Go(func() error {
defer gate.Done()
return s.client.DeleteObject(&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()})
})
}
return grp.Err()
}
示例10: SetAttrs
// SetAttrs sets multiple attributes. The provided keyval should be an even number of alternating key/value pairs to set.
func (o *Object) SetAttrs(keyval ...string) error {
if len(keyval)%2 == 1 {
panic("importer.SetAttrs: odd argument count")
}
g := syncutil.Group{}
for i := 0; i < len(keyval); i += 2 {
key, val := keyval[i], keyval[i+1]
if val != o.Attr(key) {
g.Go(func() error {
return o.SetAttr(key, val)
})
}
}
return g.Err()
}
示例11: describeReally
func (dr *DescribeRequest) describeReally(br blob.Ref, depth int) {
mime, size, err := dr.sh.index.GetBlobMIMEType(br)
if err == os.ErrNotExist {
return
}
if err != nil {
dr.addError(br, err)
return
}
// TODO: convert all this in terms of
// DescribedBlob/DescribedPermanode/DescribedFile, not json
// maps. Then add JSON marhsallers to those types. Add tests.
des := dr.describedBlob(br)
des.setMIMEType(mime)
des.Size = size
switch des.CamliType {
case "permanode":
des.Permanode = new(DescribedPermanode)
dr.populatePermanodeFields(des.Permanode, br, dr.sh.owner, depth)
case "file":
var err error
des.File, err = dr.sh.index.GetFileInfo(br)
if err != nil {
if os.IsNotExist(err) {
log.Printf("index.GetFileInfo(file %s) failed; index stale?", br)
} else {
dr.addError(br, err)
}
return
}
if des.File.IsImage() {
des.Image, err = dr.sh.index.GetImageInfo(br)
if err != nil {
if os.IsNotExist(err) {
log.Printf("index.GetImageInfo(file %s) failed; index stale?", br)
} else {
dr.addError(br, err)
}
}
}
case "directory":
var g syncutil.Group
g.Go(func() (err error) {
des.Dir, err = dr.sh.index.GetFileInfo(br)
if os.IsNotExist(err) {
log.Printf("index.GetFileInfo(directory %s) failed; index stale?", br)
}
return
})
g.Go(func() (err error) {
des.DirChildren, err = dr.getDirMembers(br, depth)
return
})
if err := g.Err(); err != nil {
dr.addError(br, err)
}
}
}
示例12: markItem
// ctx will be canceled on failure
func (c *Collector) markItem(ctx *context.Context, it Item, isRoot bool) error {
if !isRoot {
marked, err := c.Marker.IsMarked(it)
if err != nil {
return err
}
if marked {
return nil
}
}
if err := c.Marker.Mark(it); err != nil {
return err
}
ch := make(chan Item, buffered)
var grp syncutil.Group
grp.Go(func() error {
return c.ItemEnumerator.EnumerateItem(ctx, it, ch)
})
grp.Go(func() error {
for it := range ch {
if err := c.markItem(ctx, it, false); err != nil {
return err
}
}
return nil
})
if err := grp.Err(); err != nil {
ctx.Cancel()
return err
}
return nil
}
示例13: RemoveBlobs
func (m *mongoStorage) RemoveBlobs(blobs []blob.Ref) error {
var wg syncutil.Group
for _, blob := range blobs {
blob := blob
removeGate.Start()
wg.Go(func() error {
defer removeGate.Done()
err := m.c.Remove(bson.M{"key": blob.String()})
if err == mgo.ErrNotFound {
return nil
}
return err
})
}
return wg.Err()
}
示例14: Rename
// &RenameRequest{Header:fuse.Header{Conn:(*fuse.Conn)(0xc210048180), ID:0x2, Node:0x8, Uid:0xf0d4, Gid:0x1388, Pid:0x5edb}, NewDir:0x8, OldName:"1", NewName:"2"}
func (n *mutDir) Rename(req *fuse.RenameRequest, newDir fuse.Node, intr fuse.Intr) fuse.Error {
n2, ok := newDir.(*mutDir)
if !ok {
log.Printf("*mutDir newDir node isn't a *mutDir; is a %T; can't handle. returning EIO.", newDir)
return fuse.EIO
}
var wg syncutil.Group
wg.Go(n.populate)
wg.Go(n2.populate)
if err := wg.Err(); err != nil {
log.Printf("*mutDir.Rename src dir populate = %v", err)
return fuse.EIO
}
n.mu.Lock()
target, ok := n.children[req.OldName]
n.mu.Unlock()
if !ok {
log.Printf("*mutDir.Rename src name %q isn't known", req.OldName)
return fuse.ENOENT
}
now := time.Now()
// Add a camliPath:name attribute to the dest permanode before unlinking it from
// the source.
claim := schema.NewSetAttributeClaim(n2.permanode, "camliPath:"+req.NewName, target.permanodeString())
claim.SetClaimDate(now)
_, err := n.fs.client.UploadAndSignBlob(claim)
if err != nil {
log.Printf("Upload rename link error: %v", err)
return fuse.EIO
}
delClaim := schema.NewDelAttributeClaim(n.permanode, "camliPath:"+req.OldName, "")
delClaim.SetClaimDate(now)
_, err = n.fs.client.UploadAndSignBlob(delClaim)
if err != nil {
log.Printf("Upload rename src unlink error: %v", err)
return fuse.EIO
}
// TODO(bradfitz): this locking would be racy, if the kernel
// doesn't do it properly. (It should) Let's just trust the
// kernel for now. Later we can verify and remove this
// comment.
n.mu.Lock()
if n.children[req.OldName] != target {
panic("Race.")
}
delete(n.children, req.OldName)
n.mu.Unlock()
n2.mu.Lock()
n2.children[req.NewName] = target
n2.mu.Unlock()
return nil
}
示例15: Close
func (up *Uploader) Close() error {
var grp syncutil.Group
if up.haveCache != nil {
grp.Go(up.haveCache.Close)
}
grp.Go(up.Client.Close)
return grp.Err()
}