本文整理汇总了Golang中github.com/aosen/robot.Page类的典型用法代码示例。如果您正苦于以下问题:Golang Page类的具体用法?Golang Page怎么用?Golang Page使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Page类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: connectByHttp
// choose http GET/method to download
func connectByHttp(p *robot.Page, req *robot.Request) (*http.Response, error) {
client := &http.Client{}
httpreq, err := http.NewRequest(req.GetMethod(), req.GetUrl(), strings.NewReader(req.GetPostdata()))
if header := req.GetHeader(); header != nil {
httpreq.Header = req.GetHeader()
}
if cookies := req.GetCookies(); cookies != nil {
for i := range cookies {
httpreq.AddCookie(cookies[i])
}
}
var resp *http.Response
if resp, err = client.Do(httpreq); err != nil {
if e, ok := err.(*url.Error); ok && e.Err != nil && e.Err.Error() == "normal" {
// normal
} else {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
//fmt.Printf("client do error %v \r\n", err)
return nil, err
}
}
return resp, nil
}
示例2: downloadText
func (self *HttpDownloader) downloadText(p *robot.Page, req *robot.Request) *robot.Page {
p, destbody := self.downloadFile(p, req)
if !p.IsSucc() {
return p
}
p.SetBodyStr(destbody).SetStatus(false, "")
return p
}
示例3: downloadHtml
func (self *HttpDownloader) downloadHtml(p *robot.Page, req *robot.Request) *robot.Page {
var err error
p, destbody := self.downloadFile(p, req)
//fmt.Printf("Destbody %v \r\n", destbody)
if !p.IsSucc() {
//fmt.Print("Page error \r\n")
return p
}
bodyReader := bytes.NewReader([]byte(destbody))
var doc *goquery.Document
if doc, err = goquery.NewDocumentFromReader(bodyReader); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
var body string
if body, err = doc.Html(); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
p.SetBodyStr(body).SetHtmlParser(doc).SetStatus(false, "")
return p
}
示例4: contentParse
//小说内容解析
func (self *Www79xsComProcessor) contentParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]interface{})
//开始解析页面
query := p.GetHtmlParser()
html, _ := query.Find(".contentbox").Html()
meta["content"] = strings.Replace(strings.Replace(html, "<br/><br/>", "\n", -1), "<br/>", "\n", -1)
p.AddField("code", "0")
for k, v := range meta {
p.AddField(k, v.(string))
}
}
示例5: downloadJson
func (self *HttpDownloader) downloadJson(p *robot.Page, req *robot.Request) *robot.Page {
var err error
p, destbody := self.downloadFile(p, req)
if !p.IsSucc() {
return p
}
var body []byte
body = []byte(destbody)
mtype := req.GetResponceType()
if mtype == "jsonp" {
tmpstr := goutils.JsonpToJson(destbody)
body = []byte(tmpstr)
}
var r *simplejson.Json
if r, err = simplejson.NewJson(body); err != nil {
mlog.LogInst().LogError(string(body) + "\t" + err.Error())
p.SetStatus(true, err.Error())
return p
}
// json result
p.SetBodyStr(string(body)).SetJson(r).SetStatus(false, "")
return p
}
示例6: Process
func (self *Www79xsComProcessor) Process(p *robot.Page) {
//判断页面是否抓取成功
if !p.IsSucc() {
log.Println(p.Errormsg())
return
}
meta := p.GetRequest().GetMeta()
handler, ok := meta.(map[string]interface{})["handler"]
//如果meta中没有handler处理方法,则说明是入口页面,否则直接执行对应callback
if ok {
switch handler {
case "mainParse":
self.mainParse(p)
case "urlListParse":
self.urlListParse(p)
case "classParse":
self.classParse(p)
case "introParse":
self.introParse(p)
case "chaperParse":
self.chaperParse(p)
case "contentParse":
self.contentParse(p)
default:
return
}
}
}
示例7: urlListParse
//获取分类页面的url list,并解析
func (self *Www79xsComProcessor) urlListParse(p *robot.Page) {
meta := p.GetRequest().GetMeta()
//开始解析页面
query := p.GetHtmlParser()
//获取尾页addr
lastaddr, ok := query.Find("tbody a").Last().Attr("href")
if ok {
//解析addr
kv := goutils.GetKVInRelaPath(lastaddr)
//url拼接
maxpage, _ := strconv.Atoi(kv["page"])
for i := 1; i <= maxpage; i++ {
page := strconv.Itoa(i)
p.AddTargetRequest(utils.InitRequest(
"http://www.79xs.com/Book/ShowBookList.aspx?tclassid="+kv["tclassid"]+"&page="+page,
meta.(map[string]string),
self.classParse))
}
} else {
p.AddTargetRequest(utils.InitRequest(p.GetRequest().GetUrl(), meta.(map[string]string), self.classParse))
}
}
示例8: Process
func (this MyPageProcesser) Process(p *robot.Page) {
query := p.GetHtmlParser()
if p.GetUrlTag() == "index" {
query.Find(`div[class="main area"] div[class="lc"] ul li a`).Each(func(i int, s *goquery.Selection) {
url, isExsit := s.Attr("href")
if isExsit {
reg := regexp.MustCompile(`^do not know what is this`)
var fmtStr string
if rxYule.MatchString(url) {
reg = rxYule
fmtStr = wkSohuYule
}
if rxPic.MatchString(url) {
reg = rxPic
fmtStr = wkSohuPic
}
regxpArrag := reg.FindStringSubmatch(url)
if len(regxpArrag) == 2 {
addRequest(p, "changyan", fmt.Sprintf(fmtStr, regxpArrag[1]), "", s.Text())
}
}
})
}
if p.GetUrlTag() == "changyan" {
jsonMap := ChangyanJson{}
err := json.NewDecoder(strings.NewReader(p.GetBodyStr())).Decode(&jsonMap)
if err == nil {
content, ok := p.GetRequest().GetMeta().(string)
if ok {
fmt.Println("Title:", content, " CommentCount:", jsonMap.ListData.OuterCmtSum, " ParticipationCount:", jsonMap.ListData.ParticipationSum)
}
}
}
}
示例9: chaperParse
//小说章节解析
func (self *Www79xsComProcessor) chaperParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]string)
//开始解析页面
query := p.GetHtmlParser()
query.Find(".insert_list li").Each(func(i int, s *goquery.Selection) {
tmp := utils.MapCopy(meta)
tmp["chapter"] = strconv.Itoa(i)
tmp["subtitle"] = s.Find("strong a").Text()
addr, _ := s.Find("strong a").Attr("href")
tmp["contenturl"] = p.GetRequest().GetBaseUrl() + addr
//检测contenturl, 如果数据库中存在,则跳过本次抓取,如果不存在则将url加入调度队列
//这个需求有时间再做
if len(tmp["subtitle"]) != 0 {
p.AddTargetRequest(utils.InitRequest(tmp["contenturl"], tmp, self.contentParse))
}
})
}
示例10: mainParse
//主页解析
func (self *Www79xsComProcessor) mainParse(p *robot.Page) {
//开始解析页面
query := p.GetHtmlParser()
query.Find(".subnav ul li a").Each(func(i int, s *goquery.Selection) {
addr, _ := s.Attr("href")
if addr == utils.GirlUrl {
p.AddTargetRequest(utils.InitRequest(utils.BaseUrl+addr, map[string]string{"first": utils.GIRL}, self.urlListParse))
} else {
p.AddTargetRequest(utils.InitRequest(utils.BaseUrl+addr, map[string]string{"first": utils.BOY}, self.urlListParse))
}
})
}
示例11: introParse
//解析小说详情页
func (self *Www79xsComProcessor) introParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]string)
//开始解析页面
query := p.GetHtmlParser()
intro := query.Find("#info h3 p").Eq(1).Text()
img, _ := query.Find(".img img").Attr("src")
// 小说章节列表地址
chaptersource, _ := query.Find(".b1 a").Attr("href")
tmp := utils.MapCopy(meta)
tmp["introduction"] = intro
tmp["img"] = utils.BaseUrl + img
tmp["chaptersource"] = utils.BaseUrl + chaptersource
p.AddTargetRequest(utils.InitRequest(utils.BaseUrl+chaptersource, tmp, self.chaperParse))
}
示例12: Process
func (self *Www79xsComProcessor) Process(p *robot.Page) {
//判断页面是否抓取成功
if !p.IsSucc() {
mlog.LogInst().LogError(p.Errormsg())
return
}
//如果callback为空,则说明是入口页面,否则直接执行对应callback
callback := p.GetRequest().GetCallBack()
if callback == nil {
self.mainParse(p)
} else {
callback(p)
}
}
示例13: classParse
//分类列表解析
func (self *Www79xsComProcessor) classParse(p *robot.Page) {
meta := p.GetRequest().GetMeta().(map[string]string)
//开始解析页面
query := p.GetHtmlParser()
query.Find("div .yl_nr_lt2 ul").Each(func(i int, s *goquery.Selection) {
//获取二级分类, 小说标题,作者
second := s.Find(".ynl2 a").Text()
title := s.Find(".ynl3 a").Eq(1).Text()
author := s.Find(".ynl6 a").Text()
novelsource := utils.BaseUrl + func() string {
addr, _ := s.Find(".ynl3 a").Eq(1).Attr("href")
return addr
}()
tmp := make(map[string]string)
tmp["first"] = meta["first"]
tmp["second"] = second
tmp["title"] = title
tmp["author"] = author
tmp["novelsource"] = novelsource
p.AddTargetRequest(utils.InitRequest(novelsource, tmp, self.introParse))
})
}
示例14: downloadFile
// Download file and change the charset of page charset.
func (self *HttpDownloader) downloadFile(p *robot.Page, req *robot.Request) (*robot.Page, string) {
var err error
var urlstr string
if urlstr = req.GetUrl(); len(urlstr) == 0 {
mlog.LogInst().LogError("url is empty")
p.SetStatus(true, "url is empty")
return p, ""
}
var resp *http.Response
if proxystr := req.GetProxyHost(); len(proxystr) != 0 {
//using http proxy
//fmt.Print("HttpProxy Enter ",proxystr,"\n")
resp, err = connectByHttpProxy(p, req)
} else {
//normal http download
//fmt.Print("Http Normal Enter \n",proxystr,"\n")
resp, err = connectByHttp(p, req)
}
if err != nil {
return p, ""
}
p.SetHeader(resp.Header)
p.SetCookies(resp.Cookies())
// get converter to utf-8
var bodyStr string
if resp.Header.Get("Content-Encoding") == "gzip" {
bodyStr = self.changeCharsetEncodingAutoGzipSupport(resp.Header.Get("Content-Type"), resp.Body)
} else {
bodyStr = self.changeCharsetEncodingAuto(resp.Header.Get("Content-Type"), resp.Body)
}
//fmt.Printf("utf-8 body %v \r\n", bodyStr)
defer resp.Body.Close()
return p, bodyStr
}
示例15: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *robot.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
var urls []string
query.Find("h3[class='repo-list-name'] a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
urls = append(urls, "http://github.com/"+href)
})
// these urls will be saved and crawed by other coroutines.
p.AddTargetRequests(urls, "html")
name := query.Find(".entry-title .author").Text()
name = strings.Trim(name, " \t\n")
repository := query.Find(".entry-title .js-current-repository").Text()
repository = strings.Trim(repository, " \t\n")
//readme, _ := query.Find("#readme").Html()
if name == "" {
p.SetSkip(true)
}
// the entity we want to save by Pipeline
p.AddField("author", name)
p.AddField("project", repository)
//p.AddField("readme", readme)
}