當前位置: 首頁>>代碼示例>>Python>>正文


Python BeautifulSoup.BeautifulSoup方法代碼示例

本文整理匯總了Python中BeautifulSoup.BeautifulSoup方法的典型用法代碼示例。如果您正苦於以下問題:Python BeautifulSoup.BeautifulSoup方法的具體用法?Python BeautifulSoup.BeautifulSoup怎麽用?Python BeautifulSoup.BeautifulSoup使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在BeautifulSoup的用法示例。


在下文中一共展示了BeautifulSoup.BeautifulSoup方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: extract_context

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def extract_context(html, url):
    soup = BeautifulSoup(html)
    # Insert into Content (under this domain)
    texts = soup.findAll(text=True)
    try:
        Content.objects.create(
            url=url,
            title=soup.title.string,
            summary=helpers.strip_tags(" \n".join(filter(visible, texts)))[:4000],
            last_crawled_at=datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
        )
    except IntegrityError:
        println('%s - already existed in Content' % url)
    soup.prettify()
    return [str(anchor['href'])
            for anchor in soup.findAll('a', attrs={'href': re.compile("^http://")}) if anchor['href']] 
開發者ID:pixlie,項目名稱:oxidizr,代碼行數:18,代碼來源:crawl.py

示例2: parse

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def parse(self, html):
        """
        This method initiates parsing of HTML content, cleans resulting
        content as needed, and notifies the parser instance of
        resulting instances via the handle_article callback.
        """
        self.soup = BeautifulSoup(html)

        # This parses any global, non-itemized attributes from the page.
        self._parse_globals()

        # Now parse out listed articles:
        for div in self.soup.findAll(ScholarArticleParser._tag_results_checker):
            self._parse_article(div)
            self._clean_article()
            if self.article['title']:
                self.handle_article(self.article) 
開發者ID:dnlcrl,項目名稱:PyScholar,代碼行數:19,代碼來源:parser.py

示例3: list_of_all_href

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def list_of_all_href(self,html):
		'''
		It will return all hyper links found in the mr-jatt page for download
		'''	
		soup=BeautifulSoup(html)
		links=[]
		a_list=soup.findAll('a','touch')
		for x in xrange(len(a_list)-1):
			link = a_list[x].get('href')
			name = a_list[x]
			name = str(name)
			name=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',name)
			name=re.sub(r'^[0-9]+\.','',name)
			links.append([link,name])

		#quit()
		return links 
開發者ID:ankitmathur3193,項目名稱:song-cli,代碼行數:19,代碼來源:MrJattParser.py

示例4: parse_checkerproxy

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def parse_checkerproxy(self, html):
        ''' Only get elite proxies from checkerproxy '''
        ips = []
        soup = BeautifulSoup(html)
        for tr in soup.findAll('tr'):
            if len(tr) == 19:
                ip_found = False
                elite = False
                ip_port = None
                tds = tr.findAll('td')
                for td in tds:
                    if ':' in td.text:
                        ip_found = True
                        ip_port_re = re.match('(\d{1,3}\.){3}\d{1,3}:\d{1,5}', td.text)
                        if ip_port_re:
                            ip_port = ip_port_re.group()
                        if not ip_port:
                            ip_found = False
                    if 'Elite' in td.text:
                        elite = True
                    if ip_found == True and elite == True:
                        ips.append(str(ip_port))
                        break
        return ips 
開發者ID:DanMcInerney,項目名稱:get_proxy,代碼行數:26,代碼來源:get_proxy.py

示例5: sanitize_html

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def sanitize_html(value):
    valid_tags = ALLOWED_TAGS.split()
    valid_attrs = 'href src target alt'.split()

    if not value:
        return ''

    soup = BeautifulSoup(value)

    if not (soup.find('div', 'not-sanitize')):
        for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):
            comment.extract()
        for tag in soup.findAll(True):
            if tag.name not in valid_tags:
                tag.hidden = True
            tag.attrs = [(attr, val) for attr, val in tag.attrs
                         if attr in valid_attrs]
        return '<p>' + soup.renderContents().decode('utf8').replace('javascript:', '').replace("\n", '</p><p>') + '</p>'
    return soup.renderContents().decode('utf8') 
開發者ID:znick,項目名稱:anytask,代碼行數:21,代碼來源:sanitize_html.py

示例6: get_soup

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def get_soup(content):
    # check if page content can be used
    pattern = "subtitles from the source! - Addic7ed.com"
    try:
        soup = BeautifulSoup(content)
        title = str(soup.findAll("title")[0])
        if title.find(pattern) > -1:
            return soup
        else:
            log("bad page, maybe index after 404")
            return False
    except:
        log("badly formatted content")
        if self_notify:
            xbmc.executebuiltin((u'Notification(%s,%s,%s,%s)' % (__addonname__, __language__(30009), 750, __icon__)).encode('utf-8', 'ignore'))
        return False 
開發者ID:skylex,項目名稱:xbmc-betaseries,代碼行數:18,代碼來源:service.py

示例7: get_soup

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def get_soup(content):
    # check if page content can be used
    pattern = "TVsubtitles.net - "
    try:
        soup = BeautifulSoup(content)
        title = str(soup.findAll("title")[0])
        if title.find(pattern) > -1:
            return soup
        else:
            log("bad page, maybe index after 404")
            return False
    except:
        log("badly formatted content")
        if self_notify:
            xbmc.executebuiltin((u'Notification(%s,%s,%s,%s)' % (__addonname__, __language__(30009), 750, __icon__)).encode('utf-8', 'ignore'))
        return False 
開發者ID:skylex,項目名稱:xbmc-betaseries,代碼行數:18,代碼來源:service.py

示例8: ubuntu_url

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def ubuntu_url(start_date, end_date):
    """
    Args:
        start_date (date object): Starting date from which logs need to be fetched 
        end_date (date object) : Last date for which logs need to be fetched
    Returns:
        Yields channel name, current_date, and url at which log for returned
        channel and current_date is present.
    """
    
    for current_date in rrule(freq=DAILY, dtstart=start_date, until=end_date):
        url = UBUNTU_ENDPOINT.format(current_date.year,month=current_date.month, day=current_date.day)
        
        r = send_request(url)
        soup = BeautifulSoup(r)
        links = soup.findAll(href=re.compile(".txt"))
        
        for link in links:
            channel = link.string
            channel_ = channel[1:]
            
            yield channel, current_date, UBUNTU_CHANNEL_ENDPOINT.format(current_date.year, month=current_date.month, day=current_date.day, channel=channel_) 
開發者ID:prasadtalasila,項目名稱:IRCLogParser,代碼行數:24,代碼來源:log_download.py

示例9: run

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def run(self):
        url = 'http://urlvoid.com/scan/%s/' % self.artifact['name']

        try:
            status, response = get(url, headers=self.headers)

            if status:
                data = BeautifulSoup(response.text)

                if data.findAll('div', attrs={'class': 'bs-callout bs-callout-info'}):
                    pass

                elif data.findAll('div', attrs={'class': 'bs-callout bs-callout-warning'}):
                    self.artifact['data']['urlvoid'] = {}
                    for each in data.findAll('img', alt='Alert'):
                        site = each.parent.parent.td.text.lstrip()
                        url = each.parent.a['href']
                        self.artifact['data']['urlvoid'][site] = url

        except Exception as err:
            warning('Caught exception in module (%s)' % str(err)) 
開發者ID:InQuest,項目名稱:omnibus,代碼行數:23,代碼來源:urlvoid.py

示例10: ip

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def ip(self):
        url = 'http://bgp.he.net/ip/%s#_dns' % self.artifact['name']
        headers = {'User-Agent': 'OSINT Omnibus (https://github.com/InQuest/Omnibus)'}

        try:
            status, response = get(url, headers=headers)

            if status:
                result = []
                data = BeautifulSoup(response.text)

                for item in data.findAll(attrs={'id': 'dns', 'class': 'tabdata hidden'}):
                    result.append(item.text.strip())

        except Exception as err:
            warning('Caught exception in module (%s)' % str(err)) 
開發者ID:InQuest,項目名稱:omnibus,代碼行數:18,代碼來源:he.py

示例11: run

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def run(self):
        url = 'http://www.ipvoid.com/scan/%s/' % self.artifact['name']

        try:
            status, response = get(url, headers=self.headers)

            if status:
                data = BeautifulSoup(response.text)

                if data.findAll('span', attrs={'class': 'label label-success'}):
                    pass

                elif data.findAll('span', attrs={'class': 'label label-danger'}):
                    for each in data.findAll('img', alt='Alert'):
                        site = each.parent.parent.td.text.lstrip()
                        url = each.parent.a['href']
                        self.artifact['data']['ipvoid'] = {site: url}
        except Exception as err:
            warning('Caught exception in module (%s)' % str(err)) 
開發者ID:InQuest,項目名稱:omnibus,代碼行數:21,代碼來源:ipvoid.py

示例12: update_planet_fleet

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def update_planet_fleet(self, planet):
        resp = self.br.open(self._get_url('fleet', planet))
        soup = BeautifulSoup(resp)
        ships = {}
        for k, v in self.SHIPS.iteritems():
            available = 0
            try:
                s = soup.find(id='button' + v)
                available = int(s.find('span', 'textlabel').nextSibling.replace('.', ''))
            except:
                available = 0
            ships[k] = available

        #self.logger.info('Updating %s fleet' % planet)
        #self.logger.info('%s' % fleet)
        planet.ships = ships 
開發者ID:r4fek,項目名稱:ogame-bot,代碼行數:18,代碼來源:bot.py

示例13: get_player_status

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def get_player_status(self, destination, origin_planet=None):
        if not destination:
            return
            
        status = {}
        origin_planet = origin_planet or self.get_closest_planet(destination)
        galaxy, system, position = destination.split(':')

        url = self._get_url('galaxyCnt', origin_planet)
        data = urlencode({'galaxy': galaxy, 'system': system})
        resp = self.br.open(url, data=data)
        soup = BeautifulSoup(resp)

        soup.find(id='galaxytable')
        planets = soup.findAll('tr', {'class': 'row'})
        target_planet = planets[int(position)-1]
        name_el = target_planet.find('td', 'playername')
        status['name'] = name_el.find('span').text

        status['inactive'] = 'inactive' in name_el.get('class', '')
        return status 
開發者ID:r4fek,項目名稱:ogame-bot,代碼行數:23,代碼來源:bot.py

示例14: spider_image

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def spider_image(url):
    user_agent = "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"
    headers = {'User-Agent': user_agent}
    request = urllib2.Request(url, headers = headers)
    response = urllib2.urlopen(request)
    soup = BeautifulSoup(response.read())
    result = {}

    img = soup.findAll("img", attrs={"class": re.compile("wall")}, limit=1)
    result['url'] = img[0]['src']
    
    for size in soup.findAll("div", "l1"):
        if size.parent.name == "a":
            result['size'] = size.text
            break
    
    return result 
開發者ID:zengqiu,項目名稱:spider,代碼行數:19,代碼來源:wallbase.py

示例15: getLinks

# 需要導入模塊: import BeautifulSoup [as 別名]
# 或者: from BeautifulSoup import BeautifulSoup [as 別名]
def getLinks():
  req = urllib2.urlopen('http://www.example.com')
  soup = BeautifulSoup(req.read())
  for link in soup.findAll('a'):
    linkArray.append(link.get('href'))
    print(len(linkArray)) 
開發者ID:PacktPublishing,項目名稱:Learning-Concurrency-in-Python,代碼行數:8,代碼來源:ioBottleneck2.py


注:本文中的BeautifulSoup.BeautifulSoup方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。