本文整理汇总了Golang中github.com/jixiuf/go_spider/core/common/page.Page.Errormsg方法的典型用法代码示例。如果您正苦于以下问题:Golang Page.Errormsg方法的具体用法?Golang Page.Errormsg怎么用?Golang Page.Errormsg使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/jixiuf/go_spider/core/common/page.Page
的用法示例。
在下文中一共展示了Page.Errormsg方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
query.Find(`div[class="wx-rb bg-blue wx-rb_v1 _item"]`).Each(func(i int, s *goquery.Selection) {
name := s.Find("div.txt-box > h3").Text()
href, _ := s.Attr("href")
fmt.Printf("WeName:%v link:http://http://weixin.sogou.com%v \r\n", name, href)
// the entity we want to save by Pipeline
p.AddField("name", name)
p.AddField("href", href)
})
next_page_href, _ := query.Find("#sogou_next").Attr("href")
if next_page_href == "" {
p.SetSkip(true)
} else {
p.AddTargetRequestWithHeaderFile("http://weixin.sogou.com/weixin"+next_page_href, "html", "weixin.sogou.com.json")
}
}
示例2: Process
func (this *MyProcessor) Process(p *page.Page) {
if !p.IsSucc() {
mlog.LogInst().LogError(p.Errormsg())
return
}
u, err := url.Parse(p.GetRequest().GetUrl())
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !strings.HasSuffix(u.Host, "jiexieyin.org") {
return
}
var urls []string
query := p.GetHtmlParser()
query.Find("a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
reJavascript := regexp.MustCompile("^javascript\\:")
reLocal := regexp.MustCompile("^\\#")
reMailto := regexp.MustCompile("^mailto\\:")
if reJavascript.MatchString(href) || reLocal.MatchString(href) || reMailto.MatchString(href) {
return
}
//处理相对路径
var absHref string
urlHref, err := url.Parse(href)
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !urlHref.IsAbs() {
urlPrefix := p.GetRequest().GetUrl()
absHref = urlPrefix + href
urls = append(urls, absHref)
} else {
urls = append(urls, href)
}
})
p.AddTargetRequests(urls, "html")
}
示例3: Process
// Parse html dom here and record the parse result that we want to crawl.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
html := p.GetBodyStr()
newUrls := urlutil.GetAllUrlIn(p.GetRequest().GetUrl(), html)
for _, newUrl := range newUrls {
newUrl = strings.Replace(newUrl, "//weibo.com/", "//tw.weibo.com/", -1)
p.AddTargetRequest(newUrl, "html")
}
mailAddrList := mailaddrutil.GetAllMailAddrIn(html)
for _, mailAddr := range mailAddrList {
if _, ok := this.mailAddrMap[mailAddr]; !ok {
this.mailAddrMap[mailAddr] = true
this.mailLogger.WriteString(mailAddr + "\n")
this.MailHandle.Push(mailAddr)
}
}
}