本文整理汇总了Golang中github.com/rogpeppe/rog-go/parallel.NewRun函数的典型用法代码示例。如果您正苦于以下问题:Golang NewRun函数的具体用法?Golang NewRun怎么用?Golang NewRun使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewRun函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: RemoveAll
// RemoveAll removes a tree recursively.
func RemoveAll(path string, vfs rwvfs.WalkableFileSystem) error {
w := fs.WalkFS(path, vfs)
remove := func(par *parallel.Run, path string) {
par.Do(func() error { return vfs.Remove(path) })
}
var dirs []string // remove dirs after removing all files
filesPar := parallel.NewRun(20)
for w.Step() {
if err := w.Err(); err != nil {
return err
}
if w.Stat().IsDir() {
dirs = append(dirs, w.Path())
} else {
remove(filesPar, w.Path())
}
}
if err := filesPar.Wait(); err != nil {
return err
}
dirsPar := parallel.NewRun(20)
sort.Sort(sort.Reverse(sort.StringSlice(dirs))) // reverse so we delete leaf dirs first
for _, dir := range dirs {
remove(dirsPar, dir)
}
return dirsPar.Wait()
}
示例2: ScanMulti
// ScanMulti runs multiple scanner tools in parallel. It passes command-line
// options from opt to each one, and it sends the JSON representation of cfg
// (the repo/tree's Config) to each tool's stdin.
func ScanMulti(scanners [][]string, opt Options, treeConfig map[string]interface{}) ([]*unit.SourceUnit, error) {
if treeConfig == nil {
treeConfig = map[string]interface{}{}
}
var (
units []*unit.SourceUnit
mu sync.Mutex
)
run := parallel.NewRun(runtime.GOMAXPROCS(0))
for _, scanner_ := range scanners {
scanner := scanner_
run.Do(func() error {
units2, err := Scan(scanner, opt, treeConfig)
if err != nil {
return fmt.Errorf("scanner %v: %s", scanner, err)
}
mu.Lock()
defer mu.Unlock()
units = append(units, units2...)
return nil
})
}
err := run.Wait()
// Return error only if none of the commands succeeded.
if len(units) == 0 {
return nil, err
}
return units, nil
}
示例3: Defs
func (s repoStores) Defs(f ...DefFilter) ([]*graph.Def, error) {
rss, err := openRepoStores(s.opener, f)
if err != nil {
return nil, err
}
var (
allDefs []*graph.Def
allDefsMu sync.Mutex
)
par := parallel.NewRun(storeFetchPar)
for repo_, rs_ := range rss {
repo, rs := repo_, rs_
if rs == nil {
continue
}
par.Do(func() error {
defs, err := rs.Defs(filtersForRepo(repo, f).([]DefFilter)...)
if err != nil && !isStoreNotExist(err) {
return err
}
for _, def := range defs {
def.Repo = repo
}
allDefsMu.Lock()
allDefs = append(allDefs, defs...)
allDefsMu.Unlock()
return nil
})
}
err = par.Wait()
return allDefs, err
}
示例4: Units
func (s repoStores) Units(f ...UnitFilter) ([]*unit.SourceUnit, error) {
rss, err := openRepoStores(s.opener, f)
if err != nil {
return nil, err
}
var (
allUnits []*unit.SourceUnit
allUnitsMu sync.Mutex
)
par := parallel.NewRun(storeFetchPar)
for repo_, rs_ := range rss {
repo, rs := repo_, rs_
if rs == nil {
continue
}
par.Do(func() error {
units, err := rs.Units(filtersForRepo(repo, f).([]UnitFilter)...)
if err != nil && !isStoreNotExist(err) {
return err
}
for _, unit := range units {
unit.Repo = repo
}
allUnitsMu.Lock()
allUnits = append(allUnits, units...)
allUnitsMu.Unlock()
return nil
})
}
err = par.Wait()
return allUnits, err
}
示例5: Defs
func (s unitStores) Defs(fs ...DefFilter) ([]*graph.Def, error) {
uss, err := openUnitStores(s.opener, fs)
if err != nil {
return nil, err
}
var (
allDefs []*graph.Def
allDefsMu sync.Mutex
)
par := parallel.NewRun(storeFetchPar)
for u_, us_ := range uss {
u, us := u_, us_
if us == nil {
continue
}
par.Do(func() error {
defs, err := us.Defs(filtersForUnit(u, fs).([]DefFilter)...)
if err != nil && !isStoreNotExist(err) {
return err
}
for _, def := range defs {
def.UnitType = u.Type
def.Unit = u.Name
}
allDefsMu.Lock()
allDefs = append(allDefs, defs...)
allDefsMu.Unlock()
return nil
})
}
err = par.Wait()
return allDefs, err
}
示例6: refsAtOffsets
// refsAtOffsets reads the refs at the given serialized byte offsets
// from the ref data file and returns them in arbitrary order.
func (s *fsUnitStore) refsAtOffsets(ofs byteOffsets, fs []RefFilter) (refs []*graph.Ref, err error) {
vlog.Printf("%s: reading refs at %d offsets with filters %v...", s, len(ofs), fs)
f, err := openFetcherOrOpen(s.fs, unitRefsFilename)
if err != nil {
return nil, err
}
defer func() {
err2 := f.Close()
if err == nil {
err = err2
}
}()
ffs := refFilters(fs)
p := parFetches(s.fs, fs)
if p == 0 {
return nil, nil
}
var refsLock sync.Mutex
par := parallel.NewRun(p)
for _, ofs_ := range ofs {
ofs := ofs_
par.Do(func() error {
if _, moreOK := LimitRemaining(fs); !moreOK {
return nil
}
// Guess how many bytes this ref is. The s3vfs (if that's the
// VFS impl in use) will autofetch beyond that if needed.
const byteEstimate = decodeBufSize
r, err := rangeReader(s.fs, unitRefsFilename, f, ofs, byteEstimate)
if err != nil {
return err
}
dec := Codec.NewDecoder(r)
var ref graph.Ref
if _, err := dec.Decode(&ref); err != nil {
return err
}
if ffs.SelectRef(&ref) {
refsLock.Lock()
refs = append(refs, &ref)
refsLock.Unlock()
}
return nil
})
}
if err := par.Wait(); err != nil {
return refs, err
}
sort.Sort(refsByFileStartEnd(refs))
vlog.Printf("%s: read %v refs at %d offsets with filters %v.", s, len(refs), len(ofs), fs)
return refs, nil
}
示例7: BuildIndexes
// BuildIndexes builds all indexes on store and its lower-level stores
// that match the specified criteria. It returns the status of each
// index that was built (or rebuilt).
func BuildIndexes(store interface{}, c IndexCriteria, indexChan chan<- IndexStatus) ([]IndexStatus, error) {
var built []IndexStatus
var builtMu sync.Mutex
indexChan2 := make(chan IndexStatus)
done := make(chan struct{})
go func() {
var par *parallel.Run
lastDependsOnChildren := false
for sx := range indexChan2 {
doBuild := func(sx IndexStatus) {
start := time.Now()
err := sx.store.BuildIndex(sx.Name, sx.index)
sx.BuildDuration = time.Since(start)
if err == nil {
sx.Stale = false
} else {
sx.BuildError = err.Error()
}
builtMu.Lock()
built = append(built, sx)
builtMu.Unlock()
if indexChan != nil {
indexChan <- sx
}
}
// Run indexes in parallel, but if we
// encounter an index that depends on children, wait for
// all previously seen indexes to finish before building
// those indexes.
if sx.DependsOnChildren != lastDependsOnChildren && par != nil {
par.Wait()
par = nil
}
if par == nil {
par = parallel.NewRun(MaxIndexParallel)
}
sx_ := sx
par.Do(func() error { doBuild(sx_); return nil })
lastDependsOnChildren = sx.DependsOnChildren
}
if par != nil {
par.Wait()
}
done <- struct{}{}
}()
err := listIndexes(store, c, indexChan2, nil)
close(indexChan2)
<-done
return built, err
}
示例8: Refs
func (s unitStores) Refs(f ...RefFilter) ([]*graph.Ref, error) {
uss, err := openUnitStores(s.opener, f)
if err != nil {
return nil, err
}
c_unitStores_Refs_last_numUnitsQueried.set(0)
var (
allRefsMu sync.Mutex
allRefs []*graph.Ref
)
par := parallel.NewRun(storeFetchPar)
for u, us := range uss {
if us == nil {
continue
}
u, us := u, us
c_unitStores_Refs_last_numUnitsQueried.increment()
par.Do(func() error {
if _, moreOK := LimitRemaining(f); !moreOK {
return nil
}
fCopy := filtersForUnit(u, f).([]RefFilter)
fCopy = withImpliedUnit(fCopy, u)
refs, err := us.Refs(fCopy...)
if err != nil && !isStoreNotExist(err) {
return err
}
for _, ref := range refs {
ref.UnitType = u.Type
ref.Unit = u.Name
if ref.DefUnitType == "" {
ref.DefUnitType = u.Type
}
if ref.DefUnit == "" {
ref.DefUnit = u.Name
}
}
allRefsMu.Lock()
allRefs = append(allRefs, refs...)
allRefsMu.Unlock()
return nil
})
}
err = par.Wait()
return allRefs, err
}
示例9: ReadCached
// ReadCached reads a Tree's configuration from all of its source unit
// definition files (which may either be in a local VFS rooted at a
// .srclib-cache/<COMMITID> dir, or a remote VFS). It does not read
// the Srcfile; the Srcfile's directives are already accounted for in
// the cached source unit definition files.
//
// bdfs should be a VFS obtained from a call to
// (buildstore.RepoBuildStore).Commit.
func ReadCached(bdfs vfs.FileSystem) (*Tree, error) {
if _, err := bdfs.Lstat("."); os.IsNotExist(err) {
return nil, fmt.Errorf("build cache dir does not exist (did you run `src config` to create it)?")
} else if err != nil {
return nil, err
}
// Collect all **/*.unit.json files.
var unitFiles []string
unitSuffix := buildstore.DataTypeSuffix(unit.SourceUnit{})
w := fs.WalkFS(".", rwvfs.Walkable(rwvfs.ReadOnly(bdfs)))
for w.Step() {
if err := w.Err(); err != nil {
return nil, err
}
if path := w.Path(); strings.HasSuffix(path, unitSuffix) {
unitFiles = append(unitFiles, path)
}
}
// Parse units
sort.Strings(unitFiles)
units := make([]*unit.SourceUnit, len(unitFiles))
par := parallel.NewRun(runtime.GOMAXPROCS(0))
for i_, unitFile_ := range unitFiles {
i, unitFile := i_, unitFile_
par.Do(func() error {
f, err := bdfs.Open(unitFile)
if err != nil {
return err
}
if err := json.NewDecoder(f).Decode(&units[i]); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
return nil
})
}
if err := par.Wait(); err != nil {
return nil, err
}
return &Tree{SourceUnits: units}, nil
}
示例10: brokenRefsOnly
func brokenRefsOnly(refs []*graph.Ref, s interface{}) ([]*graph.Ref, error) {
uniqRefDefs := map[graph.DefKey][]*graph.Ref{}
loggedDefRepos := map[string]struct{}{}
for _, ref := range refs {
if ref.Repo != ref.DefRepo {
if _, logged := loggedDefRepos[ref.DefRepo]; !logged {
// TODO(sqs): need to skip these because we don't know the
// "DefCommitID" in the def's repo, and ByDefKey requires
// the key to have a CommitID.
log.Printf("WARNING: Can't check resolution of cross-repo ref (ref.Repo=%q != ref.DefRepo=%q) - cross-repo ref checking is not yet implemented. (This log message will not be repeated.)", ref.Repo, ref.DefRepo)
loggedDefRepos[ref.DefRepo] = struct{}{}
}
continue
}
def := ref.DefKey()
def.CommitID = ref.CommitID
uniqRefDefs[def] = append(uniqRefDefs[def], ref)
}
var (
brokenRefs []*graph.Ref
brokenRefMu sync.Mutex
par = parallel.NewRun(runtime.GOMAXPROCS(0))
)
for def_, refs_ := range uniqRefDefs {
def, refs := def_, refs_
par.Do(func() error {
defs, err := s.(store.RepoStore).Defs(store.ByDefKey(def))
if err != nil {
return err
}
if len(defs) == 0 {
brokenRefMu.Lock()
brokenRefs = append(brokenRefs, refs...)
brokenRefMu.Unlock()
}
return nil
})
}
err := par.Wait()
sort.Sort(graph.Refs(brokenRefs))
return brokenRefs, err
}
示例11: TestParallelMaxPar
func TestParallelMaxPar(t *testing.T) {
const (
totalDo = 10
maxPar = 3
)
var mu sync.Mutex
max := 0
n := 0
tot := 0
r := parallel.NewRun(maxPar)
for i := 0; i < totalDo; i++ {
r.Do(func() error {
mu.Lock()
tot++
n++
if n > max {
max = n
}
mu.Unlock()
time.Sleep(0.1e9)
mu.Lock()
n--
mu.Unlock()
return nil
})
}
err := r.Wait()
if n != 0 {
t.Errorf("%d functions still running", n)
}
if tot != totalDo {
t.Errorf("all functions not executed; want %d got %d", totalDo, tot)
}
if err != nil {
t.Errorf("wrong error; want nil got %v", err)
}
if max != maxPar {
t.Errorf("wrong number of do's ran at once; want %d got %d", maxPar, max)
}
}
示例12: OpenRepo
func OpenRepo(dir string) (*Repo, error) {
if fi, err := os.Stat(dir); err != nil || !fi.Mode().IsDir() {
return nil, fmt.Errorf("not a directory: %q", dir)
}
rc := new(Repo)
// VCS and root directory
var err error
rc.RootDir, rc.VCSType, err = getRootDir(dir)
if err != nil || rc.RootDir == "" {
log.Printf("Failed to detect git/hg repository root dir for %q; continuing.", dir)
// Be permissive and return a repo even if there is no git/hg repository.
wd, err := os.Getwd()
if err != nil {
return nil, err
}
rc.RootDir = wd
// TODO: Ensure that builds without commit ids are successful.
rc.CommitID = "ffffffffffffffffffffffffffffffffffffffff"
rc.VCSType = ""
rc.CloneURL = ""
return rc, nil
}
par := parallel.NewRun(4)
par.Do(func() error {
// Current commit ID
var err error
rc.CommitID, err = resolveWorkingTreeRevision(rc.VCSType, rc.RootDir)
return err
})
par.Do(func() error {
// Get repo URI from clone URL.
rc.CloneURL = getVCSCloneURL(rc.VCSType, rc.RootDir)
return nil
})
return rc, par.Wait()
}
示例13: Execute
func (c *BuildDataUploadCmd) Execute(args []string) error {
localFS, localRepoLabel, err := c.getLocalFileSystem()
if err != nil {
return err
}
remoteFS, remoteRepoLabel, _, err := c.getRemoteFileSystem()
if err != nil {
return err
}
if GlobalOpt.Verbose {
log.Printf("Uploading build files from %s to %s...", localRepoLabel, remoteRepoLabel)
}
// TODO(sqs): check if file exists remotely and don't upload it if it does and if it is identical
par := parallel.NewRun(8)
w := fs.WalkFS(".", rwvfs.Walkable(localFS))
for w.Step() {
if err := w.Err(); err != nil {
return err
}
fi := w.Stat()
if fi == nil {
continue
}
if !fi.Mode().IsRegular() {
continue
}
path := w.Path()
par.Do(func() error {
return uploadFile(localFS, remoteFS, path, fi, c.DryRun)
})
}
return par.Wait()
}
示例14: TestParallelError
func TestParallelError(t *testing.T) {
const (
totalDo = 10
errDo = 5
)
r := parallel.NewRun(6)
for i := 0; i < totalDo; i++ {
i := i
if i >= errDo {
r.Do(func() error {
return intError(i)
})
} else {
r.Do(func() error {
return nil
})
}
}
err := r.Wait()
if err == nil {
t.Fatalf("expected error, got none")
}
errs := err.(parallel.Errors)
if len(errs) != totalDo-errDo {
t.Fatalf("wrong error count; want %d got %d", len(errs), totalDo-errDo)
}
ints := make([]int, len(errs))
for i, err := range errs {
ints[i] = int(err.(intError))
}
sort.Ints(ints)
for i, n := range ints {
if n != i+errDo {
t.Errorf("unexpected error value; want %d got %d", i+errDo, n)
}
}
}
示例15: Import
// Import calls to the underlying fsUnitStore to write the def
// and ref data files. It also builds and writes the indexes.
func (s *indexedUnitStore) Import(data graph.Output) error {
cleanForImport(&data, "", "", "")
var defOfs, refOfs byteOffsets
var refFBRs fileByteRanges
par := parallel.NewRun(2)
par.Do(func() (err error) {
defOfs, err = s.fsUnitStore.writeDefs(data.Defs)
return err
})
par.Do(func() (err error) {
refFBRs, refOfs, err = s.fsUnitStore.writeRefs(data.Refs)
return err
})
if err := par.Wait(); err != nil {
return err
}
if err := s.buildIndexes(s.Indexes(), &data, defOfs, refFBRs, refOfs); err != nil {
return err
}
return nil
}