本文整理汇总了Golang中github.com/jixiuf/go_spider/core/common/page.Page.IsSucc方法的典型用法代码示例。如果您正苦于以下问题:Golang Page.IsSucc方法的具体用法?Golang Page.IsSucc怎么用?Golang Page.IsSucc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/jixiuf/go_spider/core/common/page.Page
的用法示例。
在下文中一共展示了Page.IsSucc方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
var urls []string
query.Find("h3[class='repo-list-name'] a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
urls = append(urls, "http://github.com/"+href)
})
// these urls will be saved and crawed by other coroutines.
p.AddTargetRequests(urls, "html")
name := query.Find(".entry-title .author").Text()
name = strings.Trim(name, " \t\n")
repository := query.Find(".entry-title .js-current-repository").Text()
repository = strings.Trim(repository, " \t\n")
//readme, _ := query.Find("#readme").Html()
if name == "" {
p.SetSkip(true)
}
// the entity we want to save by Pipeline
p.AddField("author", name)
p.AddField("project", repository)
//p.AddField("readme", readme)
}
示例2: downloadJson
func (this *HttpDownloader) downloadJson(p *page.Page, req *request.Request) *page.Page {
var err error
p, destbody := this.downloadFile(p, req)
if !p.IsSucc() {
return p
}
var body []byte
body = []byte(destbody)
mtype := req.GetResponceType()
if mtype == "jsonp" {
tmpstr := util.JsonpToJson(destbody)
body = []byte(tmpstr)
}
var r *simplejson.Json
if r, err = simplejson.NewJson(body); err != nil {
mlog.LogInst().LogError(string(body) + "\t" + err.Error())
p.SetStatus(true, err.Error())
return p
}
// json result
p.SetBodyStr(string(body)).SetJson(r).SetStatus(false, "")
return p
}
示例3: downloadHtml
func (this *HttpDownloader) downloadHtml(p *page.Page, req *request.Request) *page.Page {
var err error
p, destbody := this.downloadFile(p, req)
//fmt.Printf("Destbody %v \r\n", destbody)
if !p.IsSucc() {
//fmt.Print("Page error \r\n")
return p
}
bodyReader := bytes.NewReader([]byte(destbody))
var doc *goquery.Document
if doc, err = goquery.NewDocumentFromReader(bodyReader); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
var body string
if body, err = doc.Html(); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
p.SetBodyStr(body).SetHtmlParser(doc).SetStatus(false, "")
return p
}
示例4: downloadText
func (this *HttpDownloader) downloadText(p *page.Page, req *request.Request) *page.Page {
p, destbody := this.downloadFile(p, req)
if !p.IsSucc() {
return p
}
p.SetBodyStr(destbody).SetStatus(false, "")
return p
}
示例5: Process
func (this *MyProcessor) Process(p *page.Page) {
if !p.IsSucc() {
mlog.LogInst().LogError(p.Errormsg())
return
}
u, err := url.Parse(p.GetRequest().GetUrl())
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !strings.HasSuffix(u.Host, "jiexieyin.org") {
return
}
var urls []string
query := p.GetHtmlParser()
query.Find("a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
reJavascript := regexp.MustCompile("^javascript\\:")
reLocal := regexp.MustCompile("^\\#")
reMailto := regexp.MustCompile("^mailto\\:")
if reJavascript.MatchString(href) || reLocal.MatchString(href) || reMailto.MatchString(href) {
return
}
//处理相对路径
var absHref string
urlHref, err := url.Parse(href)
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !urlHref.IsAbs() {
urlPrefix := p.GetRequest().GetUrl()
absHref = urlPrefix + href
urls = append(urls, absHref)
} else {
urls = append(urls, href)
}
})
p.AddTargetRequests(urls, "html")
}
示例6: Process
// Parse html dom here and record the parse result that we want to crawl.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
html := p.GetBodyStr()
newUrls := urlutil.GetAllUrlIn(p.GetRequest().GetUrl(), html)
for _, newUrl := range newUrls {
newUrl = strings.Replace(newUrl, "//weibo.com/", "//tw.weibo.com/", -1)
p.AddTargetRequest(newUrl, "html")
}
mailAddrList := mailaddrutil.GetAllMailAddrIn(html)
for _, mailAddr := range mailAddrList {
if _, ok := this.mailAddrMap[mailAddr]; !ok {
this.mailAddrMap[mailAddr] = true
this.mailLogger.WriteString(mailAddr + "\n")
this.MailHandle.Push(mailAddr)
}
}
}