本文整理汇总了Golang中github.com/jixiuf/go_spider/core/common/page.Page类的典型用法代码示例。如果您正苦于以下问题:Golang Page类的具体用法?Golang Page怎么用?Golang Page使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Page类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: connectByHttp
// choose http GET/method to download
func connectByHttp(p *page.Page, req *request.Request) (*http.Response, error) {
client := &http.Client{
CheckRedirect: req.GetRedirectFunc(),
}
httpreq, err := http.NewRequest(req.GetMethod(), req.GetUrl(), strings.NewReader(req.GetPostdata()))
if header := req.GetHeader(); header != nil {
httpreq.Header = req.GetHeader()
}
if cookies := req.GetCookies(); cookies != nil {
for i := range cookies {
httpreq.AddCookie(cookies[i])
}
}
var resp *http.Response
if resp, err = client.Do(httpreq); err != nil {
if e, ok := err.(*url.Error); ok && e.Err != nil && e.Err.Error() == "normal" {
// normal
} else {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
//fmt.Printf("client do error %v \r\n", err)
return nil, err
}
}
return resp, nil
}
示例2: downloadText
func (this *HttpDownloader) downloadText(p *page.Page, req *request.Request) *page.Page {
p, destbody := this.downloadFile(p, req)
if !p.IsSucc() {
return p
}
p.SetBodyStr(destbody).SetStatus(false, "")
return p
}
示例3: downloadHtml
func (this *HttpDownloader) downloadHtml(p *page.Page, req *request.Request) *page.Page {
var err error
p, destbody := this.downloadFile(p, req)
//fmt.Printf("Destbody %v \r\n", destbody)
if !p.IsSucc() {
//fmt.Print("Page error \r\n")
return p
}
bodyReader := bytes.NewReader([]byte(destbody))
var doc *goquery.Document
if doc, err = goquery.NewDocumentFromReader(bodyReader); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
var body string
if body, err = doc.Html(); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
p.SetBodyStr(body).SetHtmlParser(doc).SetStatus(false, "")
return p
}
示例4: Process
// Parse html dom here and record the parse result that we want to crawl.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
name := query.Find(".lemmaTitleH1").Text()
name = strings.Trim(name, " \t\n")
summary := query.Find(".card-summary-content .para").Text()
summary = strings.Trim(summary, " \t\n")
// the entity we want to save by Pipeline
p.AddField("name", name)
p.AddField("summary", summary)
}
示例5: TestDownloadHtml
func TestDownloadHtml(t *testing.T) {
//return
//request := request.NewRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&callback=t13975294&id=23521&pagesize=45&dire=f&dpc=1")
var req *request.Request
req = request.NewRequest("http://live.sina.com.cn/zt/l/v/finance/globalnews1/", "html", "", "GET", "", nil, nil, nil, nil)
var dl downloader.Downloader
dl = downloader.NewHttpDownloader()
var p *page.Page
p = dl.Download(req)
var doc *goquery.Document
doc = p.GetHtmlParser()
//fmt.Println(doc)
//body := p.GetBodyStr()
//fmt.Println(body)
var s *goquery.Selection
s = doc.Find("body")
if s.Length() < 1 {
t.Error("html parse failed!")
}
/*
doc, err := goquery.NewDocument("http://live.sina.com.cn/zt/l/v/finance/globalnews1/")
if err != nil {
fmt.Printf("%v",err)
}
s := doc.Find("meta");
fmt.Println(s.Length())
resp, err := http.Get("http://live.sina.com.cn/zt/l/v/finance/globalnews1/")
if err != nil {
fmt.Printf("%v",err)
}
defer resp.Body.Close()
doc, err = goquery.NewDocumentFromReader(resp.Body)
s = doc.Find("meta");
fmt.Println(s.Length())
*/
}
示例6: TestDownloadJson
func TestDownloadJson(t *testing.T) {
//return
var req *request.Request
req = request.NewRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&id=23521&pagesize=4&dire=f&dpc=1", "json", "", "GET", "", nil, nil, nil, nil)
var dl downloader.Downloader
dl = downloader.NewHttpDownloader()
var p *page.Page
p = dl.Download(req)
var jsonMap interface{}
jsonMap = p.GetJson()
fmt.Printf("%v", jsonMap)
//fmt.Println(doc)
//body := p.GetBodyStr()
//fmt.Println(body)
}
示例7: TestCharSetChange
func TestCharSetChange(t *testing.T) {
var req *request.Request
//req = request.NewRequest("http://stock.finance.sina.com.cn/usstock/api/jsonp.php/t/US_CategoryService.getList?page=1&num=60", "jsonp")
req = request.NewRequest("http://soft.chinabyte.com/416/13164916.shtml", "html", "", "GET", "", nil, nil, nil, nil)
var dl downloader.Downloader
dl = downloader.NewHttpDownloader()
var p *page.Page
p = dl.Download(req)
//hp := p.GetHtmlParser()
//fmt.Printf("%v", jsonMap)
//fmt.Println(doc)
p.GetBodyStr()
body := p.GetBodyStr()
fmt.Println(body)
}
示例8: downloadJson
func (this *HttpDownloader) downloadJson(p *page.Page, req *request.Request) *page.Page {
var err error
p, destbody := this.downloadFile(p, req)
if !p.IsSucc() {
return p
}
var body []byte
body = []byte(destbody)
mtype := req.GetResponceType()
if mtype == "jsonp" {
tmpstr := util.JsonpToJson(destbody)
body = []byte(tmpstr)
}
var r *simplejson.Json
if r, err = simplejson.NewJson(body); err != nil {
mlog.LogInst().LogError(string(body) + "\t" + err.Error())
p.SetStatus(true, err.Error())
return p
}
// json result
p.SetBodyStr(string(body)).SetJson(r).SetStatus(false, "")
return p
}
示例9: Process
// Parse html dom here and record the parse result that we want to crawl.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
html := p.GetBodyStr()
newUrls := urlutil.GetAllUrlIn(p.GetRequest().GetUrl(), html)
for _, newUrl := range newUrls {
newUrl = strings.Replace(newUrl, "//weibo.com/", "//tw.weibo.com/", -1)
p.AddTargetRequest(newUrl, "html")
}
mailAddrList := mailaddrutil.GetAllMailAddrIn(html)
for _, mailAddr := range mailAddrList {
if _, ok := this.mailAddrMap[mailAddr]; !ok {
this.mailAddrMap[mailAddr] = true
this.mailLogger.WriteString(mailAddr + "\n")
this.MailHandle.Push(mailAddr)
}
}
}
示例10: downloadFile
// Download file and change the charset of page charset.
func (this *HttpDownloader) downloadFile(p *page.Page, req *request.Request) (*page.Page, string) {
var err error
var urlstr string
if urlstr = req.GetUrl(); len(urlstr) == 0 {
mlog.LogInst().LogError("url is empty")
p.SetStatus(true, "url is empty")
return p, ""
}
var resp *http.Response
if proxystr := req.GetProxyHost(); len(proxystr) != 0 {
//using http proxy
//fmt.Print("HttpProxy Enter ",proxystr,"\n")
resp, err = connectByHttpProxy(p, req)
} else {
//normal http download
//fmt.Print("Http Normal Enter \n",proxystr,"\n")
resp, err = connectByHttp(p, req)
}
if err != nil {
return p, ""
}
//b, _ := ioutil.ReadAll(resp.Body)
//fmt.Printf("Resp body %v \r\n", string(b))
p.SetHeader(resp.Header)
p.SetCookies(resp.Cookies())
// get converter to utf-8
var bodyStr string
if resp.Header.Get("Content-Encoding") == "gzip" {
bodyStr = this.changeCharsetEncodingAutoGzipSupport(resp.Header.Get("Content-Type"), resp.Body)
} else {
bodyStr = this.changeCharsetEncodingAuto(resp.Header.Get("Content-Type"), resp.Body)
}
//fmt.Printf("utf-8 body %v \r\n", bodyStr)
defer resp.Body.Close()
return p, bodyStr
}
示例11: addRequest
func addRequest(p *page.Page, tag, url, cookie, content string) {
req := request.NewRequest(url, "json", tag, "GET", "", nil, nil, nil, content)
p.AddTargetRequestWithParams(req)
}
示例12: Process
func (this *MyProcessor) Process(p *page.Page) {
if !p.IsSucc() {
mlog.LogInst().LogError(p.Errormsg())
return
}
u, err := url.Parse(p.GetRequest().GetUrl())
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !strings.HasSuffix(u.Host, "jiexieyin.org") {
return
}
var urls []string
query := p.GetHtmlParser()
query.Find("a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
reJavascript := regexp.MustCompile("^javascript\\:")
reLocal := regexp.MustCompile("^\\#")
reMailto := regexp.MustCompile("^mailto\\:")
if reJavascript.MatchString(href) || reLocal.MatchString(href) || reMailto.MatchString(href) {
return
}
//处理相对路径
var absHref string
urlHref, err := url.Parse(href)
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !urlHref.IsAbs() {
urlPrefix := p.GetRequest().GetUrl()
absHref = urlPrefix + href
urls = append(urls, absHref)
} else {
urls = append(urls, href)
}
})
p.AddTargetRequests(urls, "html")
}