本文整理汇总了Golang中github.com/daviddengcn/sophie.FsPath.Join方法的典型用法代码示例。如果您正苦于以下问题:Golang FsPath.Join方法的具体用法?Golang FsPath.Join怎么用?Golang FsPath.Join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/daviddengcn/sophie.FsPath
的用法示例。
在下文中一共展示了FsPath.Join方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
defer func() {
tmpFn := villa.Path("/tmp/gddo")
if err := tmpFn.RemoveAll(); err != nil {
log.Printf("Delete %v failed: %v", tmpFn, err)
}
}()
singlePackge := ""
singleETag := ""
flag.StringVar(&singlePackge, "pkg", singlePackge, "Crawling single package")
flag.StringVar(&singleETag, "etag", singleETag, "ETag for single package crawling")
flag.Parse()
httpClient := gcse.GenHttpClient("")
if singlePackge != "" {
log.Printf("Crawling single package %s ...", singlePackge)
p, err := gcse.CrawlPackage(httpClient, singlePackge, singleETag)
if err != nil {
fmtp.Printfln("Crawling package %s failured: %v", singlePackge, err)
} else {
fmtp.Printfln("Package %s: %+v", singlePackge, p)
}
return
}
log.Println("crawler started...")
// Load CrawlerDB
cDB = gcse.LoadCrawlerDB()
fpDataRoot := sophie.FsPath{
Fs: sophie.LocalFS,
Path: gcse.DataRoot.S(),
}
fpDocs := fpDataRoot.Join(gcse.FnDocs)
if err := loadAllDocsPkgs(kv.DirInput(fpDocs)); err != nil {
log.Fatalf("loadAllDocsPkgs: %v", err)
}
log.Printf("%d docs loaded!", len(allDocsPkgs))
AppStopTime = time.Now().Add(gcse.CrawlerDuePerRun)
//pathToCrawl := gcse.DataRoot.Join(gcse.FnToCrawl)
fpCrawler := fpDataRoot.Join(gcse.FnCrawlerDB)
fpToCrawl := fpDataRoot.Join(gcse.FnToCrawl)
fpNewDocs := fpCrawler.Join(gcse.FnNewDocs)
fpNewDocs.Remove()
pkgEnd := make(chan error, 1)
go crawlPackages(httpClient, fpToCrawl.Join(gcse.FnPackage), fpNewDocs,
pkgEnd)
psnEnd := make(chan error, 1)
go crawlPersons(httpClient, fpToCrawl.Join(gcse.FnPerson), psnEnd)
errPkg, errPsn := <-pkgEnd, <-psnEnd
if errPkg != nil || errPsn != nil {
log.Fatalf("Some job may failed, package: %v, person: %v",
errPkg, errPsn)
}
if err := processImports(); err != nil {
log.Printf("processImports failed: %v", err)
}
syncDatabases()
log.Println("crawler stopped...")
}
示例2: main
func main() {
runtime.GOMAXPROCS(2)
log.Printf("Using personal: %v", configs.CrawlerGithubPersonal)
gcse.GithubSpider = github.NewSpiderWithToken(configs.CrawlerGithubPersonal)
if db, err := bh.Open(configs.DataRoot.Join("filecache.bolt").S(), 0644, nil); err == nil {
log.Print("Using file cache!")
gcse.GithubSpider.FileCache = spider.BoltFileCache{
DB: db,
IncCounter: bi.Inc,
}
} else {
log.Printf("Open file cache failed: %v", err)
}
cleanTempDir()
defer cleanTempDir()
singlePackage := flag.String("pkg", "", "Crawling a single package")
singleETag := flag.String("etag", "", "ETag for the single package crawling")
singlePerson := flag.String("person", "", "Crawling a single person")
flag.Parse()
httpClient := gcse.GenHttpClient("")
if *singlePerson != "" {
log.Printf("Crawling single person %s ...", *singlePerson)
p, err := gcse.CrawlPerson(httpClient, *singlePerson)
if err != nil {
fmtp.Printfln("Crawling person %s failed: %v", *singlePerson, err)
} else {
fmtp.Printfln("Person %s: %+v", *singlePerson, p)
}
}
if *singlePackage != "" {
log.Printf("Crawling single package %s ...", *singlePackage)
p, flds, err := gcse.CrawlPackage(httpClient, *singlePackage, *singleETag)
if err != nil {
fmtp.Printfln("Crawling package %s failed: %v, folders: %v", *singlePackage, err, flds)
} else {
fmtp.Printfln("Package %s: %+v, folders: %v", *singlePackage, p, flds)
}
}
if *singlePackage != "" || *singlePerson != "" {
return
}
log.Println("crawler started...")
// Load CrawlerDB
cDB = gcse.LoadCrawlerDB()
fpDataRoot := sophie.FsPath{
Fs: sophie.LocalFS,
Path: configs.DataRoot.S(),
}
fpDocs := fpDataRoot.Join(configs.FnDocs)
if err := loadAllDocsPkgs(kv.DirInput(fpDocs)); err != nil {
log.Fatalf("loadAllDocsPkgs: %v", err)
}
log.Printf("%d docs loaded!", len(allDocsPkgs))
AppStopTime = time.Now().Add(configs.CrawlerDuePerRun)
//pathToCrawl := gcse.DataRoot.Join(gcse.FnToCrawl)
fpCrawler := fpDataRoot.Join(configs.FnCrawlerDB)
fpToCrawl := fpDataRoot.Join(configs.FnToCrawl)
fpNewDocs := fpCrawler.Join(configs.FnNewDocs)
fpNewDocs.Remove()
if err := processImports(); err != nil {
log.Printf("processImports failed: %v", err)
}
pkgEnd := make(chan error, 1)
go crawlPackages(httpClient, fpToCrawl.Join(configs.FnPackage), fpNewDocs, pkgEnd)
psnEnd := make(chan error, 1)
go crawlPersons(httpClient, fpToCrawl.Join(configs.FnPerson), psnEnd)
errPkg, errPsn := <-pkgEnd, <-psnEnd
bi.Flush()
bi.Process()
syncDatabases()
if errPkg != nil || errPsn != nil {
log.Fatalf("Some job may failed, package: %v, person: %v", errPkg, errPsn)
}
log.Println("crawler stopped...")
}