当前位置: 首页>>代码示例>>Golang>>正文


Golang page.Page类代码示例

本文整理汇总了Golang中github.com/hu17889/go_spider/core/common/page.Page的典型用法代码示例。如果您正苦于以下问题:Golang Page类的具体用法?Golang Page怎么用?Golang Page使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Page类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: getName

func (this *PlantProcesser) getName(query *goquery.Document, p *page.Page) {

	name := query.Find(".lemmaWgt-lemmaTitle-title").Find("h1").Text()
	name = strings.Trim(name, " \t\n")
	p.AddField("name", name)

}
开发者ID:liulnn,项目名称:plant-spider,代码行数:7,代码来源:processer.go

示例2: connectByHttp

// choose http GET/method to download
func connectByHttp(p *page.Page, req *request.Request) (*http.Response, error) {
	client := &http.Client{
		CheckRedirect: req.GetRedirectFunc(),
	}

	httpreq, err := http.NewRequest(req.GetMethod(), req.GetUrl(), strings.NewReader(req.GetPostdata()))
	if header := req.GetHeader(); header != nil {
		httpreq.Header = req.GetHeader()
	}

	if cookies := req.GetCookies(); cookies != nil {
		for i := range cookies {
			httpreq.AddCookie(cookies[i])
		}
	}

	var resp *http.Response
	if resp, err = client.Do(httpreq); err != nil {
		if e, ok := err.(*url.Error); ok && e.Err != nil && e.Err.Error() == "normal" {
			//  normal
		} else {
			mlog.LogInst().LogError(err.Error())
			p.SetStatus(true, err.Error())
			//fmt.Printf("client do error %v \r\n", err)
			return nil, err
		}
	}

	return resp, nil
}
开发者ID:CrocdileChan,项目名称:go_spider,代码行数:31,代码来源:downloader_http.go

示例3: Process

// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()
	var urls []string
	query.Find("#threadlisttableid tbody").Each(func(i int, s *goquery.Selection) {
		if s.HasClass("emptb") {
			return
		}
		href, _ := s.Find("tbody tr .icn a").Attr("href")
		urls = append(urls, href)
	})

	// these urls will be saved and crawed by other coroutines.
	p.AddTargetRequests(urls, "html")

	title := query.Find("#thread_subject").Text()
	title = strings.Trim(title, "\t\n\r")
	author := query.Find("#postlist div .authi").Eq(0).Text()
	author = strings.Trim(author, "\t\r\n")

	if title == "" || author == "" {
		p.SetSkip(true)
	}

	p.AddField("title", title)
	p.AddField("author", author)
}
开发者ID:tuyuwei,项目名称:test,代码行数:28,代码来源:main.go

示例4: downloadText

func (this *HttpDownloader) downloadText(p *page.Page, req *request.Request) *page.Page {
	p, destbody := this.downloadFile(p, req)
	if !p.IsSucc() {
		return p
	}

	p.SetBodyStr(destbody).SetStatus(false, "")
	return p
}
开发者ID:CrocdileChan,项目名称:go_spider,代码行数:9,代码来源:downloader_http.go

示例5: downloadHtml

func (this *HttpDownloader) downloadHtml(p *page.Page, req *request.Request) *page.Page {
	var err error
	p, destbody := this.downloadFile(p, req)
	//fmt.Printf("Destbody %v \r\n", destbody)
	if !p.IsSucc() {
		//fmt.Print("Page error \r\n")
		return p
	}
	bodyReader := bytes.NewReader([]byte(destbody))

	var doc *goquery.Document
	if doc, err = goquery.NewDocumentFromReader(bodyReader); err != nil {
		mlog.LogInst().LogError(err.Error())
		p.SetStatus(true, err.Error())
		return p
	}

	var body string
	if body, err = doc.Html(); err != nil {
		mlog.LogInst().LogError(err.Error())
		p.SetStatus(true, err.Error())
		return p
	}

	p.SetBodyStr(body).SetHtmlParser(doc).SetStatus(false, "")

	return p
}
开发者ID:CrocdileChan,项目名称:go_spider,代码行数:28,代码来源:downloader_http.go

示例6: Process

func (this *PlantProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	query := p.GetHtmlParser()

	if !this.isPlant(query, p) {
		p.SetSkip(true)
	}

	this.getName(query, p)
	this.getSummary(query, p)
	this.getCatalog(query, p)
	p.AddTargetRequests(this.getUrls(query), "html")
}
开发者ID:liulnn,项目名称:plant-spider,代码行数:16,代码来源:processer.go

示例7: Process

// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	query := p.GetHtmlParser()
	currentUrl := p.GetRequest().GetUrl()
	var urls []string
	query.Find("a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		urlHref, err := url.Parse(href)
		if err != nil {
			mlog.LogInst().LogError(err.Error())
			return
		}
		if !urlHref.IsAbs() {
			href = currentUrl + href
		}
		// Temporarily check in crawler.go, it will be implemented in pattern package.

		if checkMatchPattern(base, href) {
			visited, _ := rep.CheckIfVisited(href)
			if !visited {
				rep.VisitedNewNode(href)
				// urls = append(urls, href)
				urlstr.UploadURL(href)
			}
		}
	})

	// store content to db

	fmt.Printf("====store & commit : %s====\n\n\n", currentUrl)
	content, _ := query.Html()
	// content := ""
	storage.StoreInsert(collection, storage.StoreFormat{currentUrl, content})
	urlstr.CommitURL(currentUrl)
	releaseSlot <- 1

	url := GetOneURL()
	if url != "" {
		urls = append(urls, url)
	}

	p.AddTargetRequests(urls, "html")

}
开发者ID:plutoshe,项目名称:webCrawler,代码行数:49,代码来源:crawler.go

示例8: Process

// Parse html dom here and record the parse result that we want to crawl.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}

	query := p.GetHtmlParser()

	name := query.Find(".lemmaTitleH1").Text()
	name = strings.Trim(name, " \t\n")

	summary := query.Find(".card-summary-content .para").Text()
	summary = strings.Trim(summary, " \t\n")

	// the entity we want to save by Pipeline
	p.AddField("name", name)
	p.AddField("summary", summary)
}
开发者ID:CrocdileChan,项目名称:go_spider,代码行数:20,代码来源:main.go

示例9: TestDownloadJson

func TestDownloadJson(t *testing.T) {
	var req *request.Request
	req = request.NewRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&id=23521&pagesize=4&dire=f&dpc=1", "json")

	var dl downloader.Downloader
	dl = downloader.NewHttpDownloader()

	var p *page.Page
	p = dl.Download(req)

	var jsonMap interface{}
	jsonMap = p.GetJsonMap()
	fmt.Printf("%v", jsonMap)

	//fmt.Println(doc)
	//body := p.GetBodyStr()
	//fmt.Println(body)

}
开发者ID:w3hacker,项目名称:go_spider,代码行数:19,代码来源:downloader_test.go

示例10: TestDownloadHtml

func TestDownloadHtml(t *testing.T) {
	//return
	//request := request.NewRequest("http://live.sina.com.cn/zt/api/l/get/finance/globalnews1/index.htm?format=json&callback=t13975294&id=23521&pagesize=45&dire=f&dpc=1")
	var req *request.Request
	req = request.NewRequest("http://live.sina.com.cn/zt/l/v/finance/globalnews1/", "html", "", "GET", "", nil, nil, nil, nil)

	var dl downloader.Downloader
	dl = downloader.NewHttpDownloader()

	var p *page.Page
	p = dl.Download(req)

	var doc *goquery.Document
	doc = p.GetHtmlParser()
	//fmt.Println(doc)
	//body := p.GetBodyStr()
	//fmt.Println(body)

	var s *goquery.Selection
	s = doc.Find("body")
	if s.Length() < 1 {
		t.Error("html parse failed!")
	}

	/*
	   doc, err := goquery.NewDocument("http://live.sina.com.cn/zt/l/v/finance/globalnews1/")
	   if err != nil {
	       fmt.Printf("%v",err)
	   }
	   s := doc.Find("meta");
	   fmt.Println(s.Length())

	   resp, err := http.Get("http://live.sina.com.cn/zt/l/v/finance/globalnews1/")
	   if err != nil {
	       fmt.Printf("%v",err)
	   }
	   defer resp.Body.Close()
	   doc, err = goquery.NewDocumentFromReader(resp.Body)
	   s = doc.Find("meta");
	   fmt.Println(s.Length())
	*/
}
开发者ID:CrocdileChan,项目名称:go_spider,代码行数:42,代码来源:downloader_test.go

示例11: TestCharSetChange

func TestCharSetChange(t *testing.T) {
	var req *request.Request
	//req = request.NewRequest("http://stock.finance.sina.com.cn/usstock/api/jsonp.php/t/US_CategoryService.getList?page=1&num=60", "jsonp")
	req = request.NewRequest("http://soft.chinabyte.com/416/13164916.shtml", "html", "", "GET", "", nil, nil, nil, nil)

	var dl downloader.Downloader
	dl = downloader.NewHttpDownloader()

	var p *page.Page
	p = dl.Download(req)

	//hp := p.GetHtmlParser()
	//fmt.Printf("%v", jsonMap)

	//fmt.Println(doc)
	p.GetBodyStr()
	body := p.GetBodyStr()
	fmt.Println(body)

}
开发者ID:CrocdileChan,项目名称:go_spider,代码行数:20,代码来源:downloader_test.go

示例12: downloadJson

func (this *HttpDownloader) downloadJson(p *page.Page, req *request.Request) *page.Page {
	var err error
	p, destbody := this.downloadFile(p, req)
	if !p.IsSucc() {
		return p
	}

	var body []byte
	body = []byte(destbody)
	mtype := req.GetResponceType()
	if mtype == "jsonp" {
		tmpstr := util.JsonpToJson(destbody)
		body = []byte(tmpstr)
	}

	var r *simplejson.Json
	if r, err = simplejson.NewJson(body); err != nil {
		mlog.LogInst().LogError(string(body) + "\t" + err.Error())
		p.SetStatus(true, err.Error())
		return p
	}

	// json result
	p.SetBodyStr(string(body)).SetJson(r).SetStatus(false, "")

	return p
}
开发者ID:CrocdileChan,项目名称:go_spider,代码行数:27,代码来源:downloader_http.go

示例13: Process

func (this SitePageProcesser) Process(p *page.Page) {
	fmt.Println("Site Page Processer")

	if p.GetUrlTag() == "index" {
		query := p.GetHtmlParser()
		query.Find("ul[class='audioList fontYaHei'] li a").Each(func(i int, s *goquery.Selection) {
			strTitle, _ := s.Attr("title")
			strUrl, _ := s.Attr("data-url")

			if !IsFileExist(strTitle) {
				strFileName := fmt.Sprintf("%s.mp3", strTitle)
				fmt.Println(strFileName)
				cmd := exec.Command("/usr/local/bin/wget", strUrl, "-O", strFileName)
				err := cmd.Run()
				if err != nil {
					fmt.Println(err)
				}
				d, _ := cmd.Output()
				fmt.Println(string(d))
			}
		})
	}
}
开发者ID:rpoverflow,项目名称:LiZhiFMCrawler,代码行数:23,代码来源:sitePageProcess.go

示例14: Process

// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()
	var urls []string
	query.Find("h3[class='repo-list-name'] a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		urls = append(urls, "http://github.com/"+href)
	})
	// these urls will be saved and crawed by other coroutines.
	p.AddTargetRequests(urls, "html")

	name := query.Find(".entry-title .author").Text()
	name = strings.Trim(name, " \t\n")
	repository := query.Find(".entry-title .js-current-repository").Text()
	repository = strings.Trim(repository, " \t\n")
	//readme, _ := query.Find("#readme").Html()
	if name == "" {
		p.SetSkip(true)
	}
	// the entity we want to save by Pipeline
	p.AddField("author", name)
	p.AddField("project", repository)
	//p.AddField("readme", readme)
}
开发者ID:w3hacker,项目名称:go_spider,代码行数:25,代码来源:github_repo_page_processor.go

示例15: Process

// Parse html dom here and record the parse result that we want to Page.
// Package goquery (http://godoc.org/github.com/PuerkitoBio/goquery) is used to parse html.
func (this *MyPageProcesser) Process(p *page.Page) {
	if !p.IsSucc() {
		println(p.Errormsg())
		return
	}
	var fetch_content string
	query := p.GetHtmlParser()
	content := p.GetBodyStr()
	reg := regexp.MustCompile(`class="([0-9a-zA-Z_-]*content[0-9a-zA-Z_-]*)"`)
	reg_res := reg.FindAllStringSubmatch(content, -1)
	class_content := make([]string, 0)
	for _, class := range reg_res {
		submatch := class[1]
		class_content = append(class_content, submatch)
	}
	removeDuplicate(&class_content)

	for _, class := range class_content {

		query.Find("." + class).Each(func(i int, s *goquery.Selection) {
			text := strings.Trim(s.Text(), " \t\n")
			text = strings.Replace(text, " ", "", -1)
			text = strings.Replace(text, "\n", "", -1)
			text = strings.Replace(text, "\t", "", -1)

			if text != "" {
				fetch_content = fetch_content + text
			}
		})
	}

	if fetch_content != "" {
		p.AddField("content", fetch_content)
	}

}
开发者ID:wadee,项目名称:go_proj,代码行数:38,代码来源:website_crawler.go


注:本文中的github.com/hu17889/go_spider/core/common/page.Page类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。