本文整理汇总了Golang中github.com/hu17889/go_spider/core/common/page.Page.IsSucc方法的典型用法代码示例。如果您正苦于以下问题:Golang Page.IsSucc方法的具体用法?Golang Page.IsSucc怎么用?Golang Page.IsSucc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/hu17889/go_spider/core/common/page.Page
的用法示例。
在下文中一共展示了Page.IsSucc方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: downloadHtml
func (this *HttpDownloader) downloadHtml(p *page.Page, req *request.Request) *page.Page {
var err error
p, destbody := this.downloadFile(p, req)
//fmt.Printf("Destbody %v \r\n", destbody)
if !p.IsSucc() {
//fmt.Print("Page error \r\n")
return p
}
bodyReader := bytes.NewReader([]byte(destbody))
var doc *goquery.Document
if doc, err = goquery.NewDocumentFromReader(bodyReader); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
var body string
if body, err = doc.Html(); err != nil {
mlog.LogInst().LogError(err.Error())
p.SetStatus(true, err.Error())
return p
}
p.SetBodyStr(body).SetHtmlParser(doc).SetStatus(false, "")
return p
}
示例2: downloadJson
func (this *HttpDownloader) downloadJson(p *page.Page, req *request.Request) *page.Page {
var err error
p, destbody := this.downloadFile(p, req)
if !p.IsSucc() {
return p
}
var body []byte
body = []byte(destbody)
mtype := req.GetResponceType()
if mtype == "jsonp" {
tmpstr := util.JsonpToJson(destbody)
body = []byte(tmpstr)
}
var r *simplejson.Json
if r, err = simplejson.NewJson(body); err != nil {
mlog.LogInst().LogError(string(body) + "\t" + err.Error())
p.SetStatus(true, err.Error())
return p
}
// json result
p.SetBodyStr(string(body)).SetJson(r).SetStatus(false, "")
return p
}
示例3: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
var urls []string
query.Find("h3[class='repo-list-name'] a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
urls = append(urls, "http://github.com/"+href)
})
// these urls will be saved and crawed by other coroutines.
p.AddTargetRequests(urls, "html")
name := query.Find(".entry-title .author").Text()
name = strings.Trim(name, " \t\n")
repository := query.Find(".entry-title .js-current-repository").Text()
repository = strings.Trim(repository, " \t\n")
//readme, _ := query.Find("#readme").Html()
if name == "" {
p.SetSkip(true)
}
// the entity we want to save by Pipeline
p.AddField("author", name)
p.AddField("project", repository)
//p.AddField("readme", readme)
}
示例4: Process
// Parse html dom here and record the parse result that we want to crawl.
// Package simplejson (https://github.com/bitly/go-simplejson) is used to parse data of json.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetJson()
status, err := query.GetPath("result", "status", "code").Int()
if status != 0 || err != nil {
log.Panicf("page is crawled error : errorinfo=%s : status=%d : startNewsId=%d", err.Error(), status, this.startNewsId)
}
num, err := query.GetPath("result", "pageStr", "pageSize").Int()
if num == 0 || err != nil {
// Add url of next crawl
startIdstr := strconv.Itoa(this.startNewsId)
p.AddTargetRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&id="+startIdstr+"&pagesize=10&dire=f", "json")
return
}
var idint, nextid int
var nextidstr string
query = query.Get("result").Get("data")
for i := 0; i < num; i++ {
id, err := query.GetIndex(i).Get("id").String()
if id == "" || err != nil {
continue
}
idint, err = strconv.Atoi(id)
if err != nil {
continue
}
if idint <= this.startNewsId {
break
}
if i == 0 {
nextid = idint
nextidstr = id
}
content, err := query.GetIndex(i).Get("content").String()
if content == "" || err != nil {
continue
}
time, err := query.GetIndex(i).Get("created_at").String()
if err != nil {
continue
}
p.AddField(id+"_id", id)
p.AddField(id+"_content", content)
p.AddField(id+"_time", time)
}
// Add url of next crawl
this.startNewsId = nextid
p.AddTargetRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&id="+nextidstr+"&pagesize=10&dire=f", "json")
//println(p.GetTargetRequests())
}
示例5: downloadText
func (this *HttpDownloader) downloadText(p *page.Page, req *request.Request) *page.Page {
p, destbody := this.downloadFile(p, req)
if !p.IsSucc() {
return p
}
p.SetBodyStr(destbody).SetStatus(false, "")
return p
}
示例6: Process
func (this *PlantProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
if !this.isPlant(query, p) {
p.SetSkip(true)
}
this.getName(query, p)
this.getSummary(query, p)
this.getCatalog(query, p)
p.AddTargetRequests(this.getUrls(query), "html")
}
示例7: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
query := p.GetHtmlParser()
currentUrl := p.GetRequest().GetUrl()
var urls []string
query.Find("a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
urlHref, err := url.Parse(href)
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !urlHref.IsAbs() {
href = currentUrl + href
}
// Temporarily check in crawler.go, it will be implemented in pattern package.
if checkMatchPattern(base, href) {
visited, _ := rep.CheckIfVisited(href)
if !visited {
rep.VisitedNewNode(href)
// urls = append(urls, href)
urlstr.UploadURL(href)
}
}
})
// store content to db
fmt.Printf("====store & commit : %s====\n\n\n", currentUrl)
content, _ := query.Html()
// content := ""
storage.StoreInsert(collection, storage.StoreFormat{currentUrl, content})
urlstr.CommitURL(currentUrl)
releaseSlot <- 1
url := GetOneURL()
if url != "" {
urls = append(urls, url)
}
p.AddTargetRequests(urls, "html")
}
示例8: Process
func (this *MyProcessor) Process(p *page.Page) {
if !p.IsSucc() {
mlog.LogInst().LogError(p.Errormsg())
return
}
u, err := url.Parse(p.GetRequest().GetUrl())
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !strings.HasSuffix(u.Host, "jiexieyin.org") {
return
}
var urls []string
query := p.GetHtmlParser()
query.Find("a").Each(func(i int, s *goquery.Selection) {
href, _ := s.Attr("href")
reJavascript := regexp.MustCompile("^javascript\\:")
reLocal := regexp.MustCompile("^\\#")
reMailto := regexp.MustCompile("^mailto\\:")
if reJavascript.MatchString(href) || reLocal.MatchString(href) || reMailto.MatchString(href) {
return
}
//处理相对路径
var absHref string
urlHref, err := url.Parse(href)
if err != nil {
mlog.LogInst().LogError(err.Error())
return
}
if !urlHref.IsAbs() {
urlPrefix := p.GetRequest().GetUrl()
absHref = urlPrefix + href
urls = append(urls, absHref)
} else {
urls = append(urls, href)
}
})
p.AddTargetRequests(urls, "html")
}
示例9: pageProcess
// core processer
func (this *Spider) pageProcess(req *request.Request) {
var p *page.Page
defer func() {
if err := recover(); err != nil { // do not affect other
if strerr, ok := err.(string); ok {
mlog.LogInst().LogError(strerr)
} else {
mlog.LogInst().LogError("pageProcess error")
}
}
}()
// download page
for i := 0; i < 3; i++ {
this.sleep()
p = this.pDownloader.Download(req)
if p.IsSucc() { // if fail retry 3 times
break
}
}
if !p.IsSucc() { // if fail do not need process
return
}
this.pPageProcesser.Process(p)
for _, req := range p.GetTargetRequests() {
this.AddRequest(req)
}
// output
if !p.GetSkip() {
for _, pip := range this.pPiplelines {
//fmt.Println("%v",p.GetPageItems().GetAll())
pip.Process(p.GetPageItems(), this)
}
}
}
示例10: Process
/*
** 解析页面,把粉丝的信息存入dynamodb,同时把接下来要爬取的url存入sqs
*/
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
glog.Errorln(p.Errormsg())
return
}
/*
** 打印爬取得页面
*/
glog.Infoln(p)
query := p.GetHtmlParser()
if Urls[i] == "weibo.cn" {
i = i + 1
}
if UrlsLevel[i] == 0 {
glog.Infoln("layer:", crawlUrl.Layer)
this.w.GetNextPageUrl(query, p)
this.w.GetFriendsUrl(query, p)
} else if UrlsLevel[i] == 1 {
this.w.GetFriendsInfo(query)
}
// if crawlUrl.Layer == 0 {
// } else if crawlUrl.Layer == 1 {
// glog.Infoln("layer:", crawlUrl.Layer)
// this.w.GetNextPageUrl(query, p)
// this.w.GetFFUrl(query)
// } else if crawlUrl.Layer == 2 {
// glog.Infoln("layer:", crawlUrl.Layer)
// this.w.GetFFInfo(query)
// }
//
header_num := rand.Intn(9)
header_json := headerJson[header_num]
i = i + 1
p.AddTargetRequestWithHeaderFile(Urls[i], "html", header_json)
}
示例11: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
var fetch_content string
query := p.GetHtmlParser()
content := p.GetBodyStr()
reg := regexp.MustCompile(`class="([0-9a-zA-Z_-]*content[0-9a-zA-Z_-]*)"`)
reg_res := reg.FindAllStringSubmatch(content, -1)
class_content := make([]string, 0)
for _, class := range reg_res {
submatch := class[1]
class_content = append(class_content, submatch)
}
removeDuplicate(&class_content)
for _, class := range class_content {
query.Find("." + class).Each(func(i int, s *goquery.Selection) {
text := strings.Trim(s.Text(), " \t\n")
text = strings.Replace(text, " ", "", -1)
text = strings.Replace(text, "\n", "", -1)
text = strings.Replace(text, "\t", "", -1)
if text != "" {
fetch_content = fetch_content + text
}
})
}
if fetch_content != "" {
p.AddField("content", fetch_content)
}
}
示例12: Process
// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
if !p.IsSucc() {
println(p.Errormsg())
return
}
var crawok bool
crawok = false
//query := p.GetHtmlParser()
//var urls []string
//fmt.Println(p.GetBodyStr())
re := regexp.MustCompile(`<a href="(.*?)">(.*?)`)
sectUrlsTemp := re.FindAllSubmatch([]byte(p.GetBodyStr()), -1)
for _, url := range sectUrlsTemp {
for _, url1 := range url {
crawok = true
http_index := strings.Index(string(url1), "http://shinichr.diandian.com")
http_note := strings.Index(string(url1), "\"")
http_quote := strings.Index(string(url1), "#")
if http_index >= 0 && http_quote < 0 {
if http_note > 0 && http_note < http_index {
continue
}
var http_url string
if http_note <= 0 {
http_url = string(url1)[http_index:]
} else {
http_url = string(url1)[http_index:http_note]
}
if this.visit_url[http_url] == 0 {
this.visit_url[http_url] = 1
fmt.Println("####unvisited:", http_url)
//fmt.Println("###AddTargetRequest:", http_url)
p.AddTargetRequest(http_url, "html")
}
}
}
}
if crawok == false {
fmt.Println("crawl false:*****************", p.GetRequest().GetUrl())
http_page := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/page")
http_post := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/post")
fmt.Println("http_page:", http_page, "http_post:", http_post)
if http_page >= 0 || http_post >= 0 {
//this.visit_url[p.GetRequest().GetUrl()] = 0
p.AddTargetRequest(p.GetRequest().GetUrl(), "html")
}
}
http_index := strings.Index(p.GetRequest().GetUrl(), "http://shinichr.diandian.com/post/")
// rex, _ := regexp.Compile("\\/")
//replaceurl := rex.ReplaceAllString(p.GetRequest().GetUrl(), ".")
//fmt.Println("http_index=", http_index)
//fmt.Println("replaceurl=", p.GetRequest().GetUrl()[http_index:], "....", http_index)
if http_index >= 0 {
cuturl := p.GetRequest().GetUrl()[34:]
//fmt.Println("replaceurl=", cuturl)
rex, _ := regexp.Compile("\\/")
replaceurl := rex.ReplaceAllString(cuturl, ".")
filedir := fmt.Sprintf("/home/shinichr/diandian_post/%s", replaceurl)
fout, err := os.Create(filedir)
if err != nil {
fmt.Println(filedir, err)
return
}
defer fout.Close()
src := p.GetBodyStr()
re, _ := regexp.Compile("\\<[\\S\\s]+?\\>")
src = re.ReplaceAllStringFunc(src, strings.ToLower)
//去除STYLE
re, _ = regexp.Compile("\\<style[\\S\\s]+?\\</style\\>")
src = re.ReplaceAllString(src, "")
//去除SCRIPT
re, _ = regexp.Compile("\\<script[\\S\\s]+?\\</script\\>")
src = re.ReplaceAllString(src, "")
//去除所有尖括号内的HTML代码,并换成换行符
re, _ = regexp.Compile("\\<[\\S\\s]+?\\>")
src = re.ReplaceAllString(src, "\n")
//去除连续的换行符
re, _ = regexp.Compile("\\s{2,}")
src = re.ReplaceAllString(src, "\n")
//.........这里部分代码省略.........