本文整理匯總了Golang中github.com/aosen/robot.Page.GetHtmlParser方法的典型用法代碼示例。如果您正苦於以下問題:Golang Page.GetHtmlParser方法的具體用法?Golang Page.GetHtmlParser怎麽用?Golang Page.GetHtmlParser使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/aosen/robot.Page
的用法示例。
在下文中一共展示了Page.GetHtmlParser方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *robot.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
var urls []string
query.Find("h3[class='repo-list-name'] a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
urls = append(urls, "http://github.com/"+href)
})
// these urls will be saved and crawed by other coroutines.
p.AddTargetRequests(urls, "html")
name := query.Find(".entry-title .author").Text()
name = strings.Trim(name, " \t\n")
repository := query.Find(".entry-title .js-current-repository").Text()
repository = strings.Trim(repository, " \t\n")
//readme, _ := query.Find("#readme").Html()
if name == "" {
p.SetSkip(true)
}
// the entity we want to save by Pipeline
p.AddField("author", name)
p.AddField("project", repository)
//p.AddField("readme", readme)
}
示例2: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *robot.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
query.Find(`div[class="wx-rb bg-blue wx-rb_v1 _item"]`).Each(func(i int, s *goquery.Selection) {
name := s.Find("div.txt-box > h3").Text()
href, _ := s.Attr("href")
fmt.Printf("WeName:%v link:http://http://weixin.sogou.com%v \r\n", name, href)
// the entity we want to save by Pipeline
p.AddField("name", name)
p.AddField("href", href)
})
next_page_href, _ := query.Find("#sogou_next").Attr("href")
if next_page_href == "" {
p.SetSkip(true)
} else {
p.AddTargetRequestWithHeaderFile("http://weixin.sogou.com/weixin"+next_page_href, "html", "weixin.sogou.com.json")
}
}
示例3: contentParse
//小說內容解析
func (self *Www79xsComProcessor) contentParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]interface{})
//開始解析頁麵
query := p.GetHtmlParser()
html, _ := query.Find(".contentbox").Html()
meta["content"] = strings.Replace(strings.Replace(html, "<br/><br/>", "\n", -1), "<br/>", "\n", -1)
p.AddField("code", "0")
for k, v := range meta {
p.AddField(k, v.(string))
}
}
示例4: mainParse
//主頁解析
func (self *Www79xsComProcessor) mainParse(p *robot.Page) {
//開始解析頁麵
query := p.GetHtmlParser()
query.Find(".subnav ul li a").Each(func(i int, s *goquery.Selection) {
addr, _ := s.Attr("href")
if addr == utils.GirlUrl {
p.AddTargetRequest(utils.InitRequest(utils.BaseUrl+addr, map[string]string{"first": utils.GIRL}, self.urlListParse))
} else {
p.AddTargetRequest(utils.InitRequest(utils.BaseUrl+addr, map[string]string{"first": utils.BOY}, self.urlListParse))
}
})
}
示例5: introParse
//解析小說詳情頁
func (self *Www79xsComProcessor) introParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]string)
//開始解析頁麵
query := p.GetHtmlParser()
intro := query.Find("#info h3 p").Eq(1).Text()
img, _ := query.Find(".img img").Attr("src")
// 小說章節列表地址
chaptersource, _ := query.Find(".b1 a").Attr("href")
tmp := utils.MapCopy(meta)
tmp["introduction"] = intro
tmp["img"] = utils.BaseUrl + img
tmp["chaptersource"] = utils.BaseUrl + chaptersource
p.AddTargetRequest(utils.InitRequest(utils.BaseUrl+chaptersource, tmp, self.chaperParse))
}
示例6: Process
func (self *MyProcessor) Process(p *robot.Page) {
if !p.IsSucc() {
mlog.LogInst().LogError(p.Errormsg())
return
}
u, err := url.Parse(p.GetRequest().GetUrl())
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !strings.HasSuffix(u.Host, "jiexieyin.org") {
return
}
var urls []string
query := p.GetHtmlParser()
query.Find("a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
reJavascript := regexp.MustCompile("^javascript\\:")
reLocal := regexp.MustCompile("^\\#")
reMailto := regexp.MustCompile("^mailto\\:")
if reJavascript.MatchString(href) || reLocal.MatchString(href) || reMailto.MatchString(href) {
return
}
//處理相對路徑
var absHref string
urlHref, err := url.Parse(href)
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !urlHref.IsAbs() {
urlPrefix := p.GetRequest().GetUrl()
absHref = urlPrefix + href
urls = append(urls, absHref)
} else {
urls = append(urls, href)
}
})
p.AddTargetRequests(initrequests(urls))
p.AddField("test1", p.GetRequest().GetUrl())
p.AddField("test2", p.GetRequest().GetUrl())
}
示例7: chaperParse
//小說章節解析
func (self *Www79xsComProcessor) chaperParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]string)
//開始解析頁麵
query := p.GetHtmlParser()
query.Find(".insert_list li").Each(func(i int, s *goquery.Selection) {
tmp := utils.MapCopy(meta)
tmp["chapter"] = strconv.Itoa(i)
tmp["subtitle"] = s.Find("strong a").Text()
addr, _ := s.Find("strong a").Attr("href")
tmp["contenturl"] = p.GetRequest().GetBaseUrl() + addr
//檢測contenturl, 如果數據庫中存在,則跳過本次抓取,如果不存在則將url加入調度隊列
//這個需求有時間再做
if len(tmp["subtitle"]) != 0 {
p.AddTargetRequest(utils.InitRequest(tmp["contenturl"], tmp, self.contentParse))
}
})
}
示例8: classParse
//分類列表解析
func (self *Www79xsComProcessor) classParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]string)
//開始解析頁麵
query := p.GetHtmlParser()
query.Find("div .yl_nr_lt2 ul").Each(func(i int, s *goquery.Selection) {
//獲取二級分類, 小說標題,作者
second := s.Find(".ynl2 a").Text()
title := s.Find(".ynl3 a").Eq(1).Text()
author := s.Find(".ynl6 a").Text()
novelsource := utils.BaseUrl + func() string {
addr, _ := s.Find(".ynl3 a").Eq(1).Attr("href")
return addr
}()
tmp := make(map[string]string)
tmp["first"] = meta["first"]
tmp["second"] = second
tmp["title"] = title
tmp["author"] = author
tmp["novelsource"] = novelsource
p.AddTargetRequest(utils.InitRequest(novelsource, tmp, self.introParse))
})
}
示例9: urlListParse
//獲取分類頁麵的url list,並解析
func (self *Www79xsComProcessor) urlListParse(p *robot.Page) {
meta := p.GetRequest().GetMeta()
//開始解析頁麵
query := p.GetHtmlParser()
//獲取尾頁addr
lastaddr, ok := query.Find("tbody a").Last().Attr("href")
if ok {
//解析addr
kv := goutils.GetKVInRelaPath(lastaddr)
//url拚接
maxpage, _ := strconv.Atoi(kv["page"])
for i := 1; i <= maxpage; i++ {
page := strconv.Itoa(i)
p.AddTargetRequest(utils.InitRequest(
"http://www.79xs.com/Book/ShowBookList.aspx?tclassid="+kv["tclassid"]+"&page="+page,
meta.(map[string]string),
self.classParse))
}
} else {
p.AddTargetRequest(utils.InitRequest(p.GetRequest().GetUrl(), meta.(map[string]string), self.classParse))
}
}
示例10: Process
func (this MyPageProcesser) Process(p *robot.Page) {
query := p.GetHtmlParser()
if p.GetUrlTag() == "index" {
query.Find(`div[class="main area"] div[class="lc"] ul li a`).Each(func(i int, s *goquery.Selection) {
url, isExsit := s.Attr("href")
if isExsit {
reg := regexp.MustCompile(`^do not know what is this`)
var fmtStr string
if rxYule.MatchString(url) {
reg = rxYule
fmtStr = wkSohuYule
}
if rxPic.MatchString(url) {
reg = rxPic
fmtStr = wkSohuPic
}
regxpArrag := reg.FindStringSubmatch(url)
if len(regxpArrag) == 2 {
addRequest(p, "changyan", fmt.Sprintf(fmtStr, regxpArrag[1]), "", s.Text())
}
}
})
}
if p.GetUrlTag() == "changyan" {
jsonMap := ChangyanJson{}
err := json.NewDecoder(strings.NewReader(p.GetBodyStr())).Decode(&jsonMap)
if err == nil {
content, ok := p.GetRequest().GetMeta().(string)
if ok {
fmt.Println("Title:", content, " CommentCount:", jsonMap.ListData.OuterCmtSum, " ParticipationCount:", jsonMap.ListData.ParticipationSum)
}
}
}
}