当前位置: 首页>>代码示例>>Python>>正文


Python html.xpath方法代码示例

本文整理汇总了Python中lxml.html.xpath方法的典型用法代码示例。如果您正苦于以下问题:Python html.xpath方法的具体用法?Python html.xpath怎么用?Python html.xpath使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lxml.html的用法示例。


在下文中一共展示了html.xpath方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parseXMLxpathSearch

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def parseXMLxpathSearch(xml_source, xpathString):
#---------------------------------------------------------------------------------

    return_values = []

    try:
        root = etree.XML(xml_source)

        data_points = root.xpath(xpathString)
    
        for data in data_points:
            return_values.append(etree.tostring(data))
            data.clear()

    except:
        pass

    return return_values

#---------------------------------------------------------------------------------
# parse XML and return value asked (designed for errors via stdout) 
开发者ID:kenb123,项目名称:Basic-Expression-Lexicon-Variation-Algorithms-BELVA,代码行数:23,代码来源:belvaParseXML.py

示例2: parseXMLxpathSearchSingle

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def parseXMLxpathSearchSingle(xml_source, xpathString):
#---------------------------------------------------------------------------------

    return_values = []

    try:
        root = etree.XML(xml_source)

        data_points = root.xpath(xpathString)
    
        for data in data_points:
            return_values.append(data)
            data.clear()

    except:
        pass

    return return_values



#---------------------------------------------------------------------------------
# parse HTML and return value asked 
开发者ID:kenb123,项目名称:Basic-Expression-Lexicon-Variation-Algorithms-BELVA,代码行数:25,代码来源:belvaParseXML.py

示例3: parseXMLxpathSearchAttribute

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def parseXMLxpathSearchAttribute(xml_source, xpathString):
#---------------------------------------------------------------------------------


    return_values = []

    try:
        root = etree.XML(xml_source)

        data_points = root.xpath(xpathString)
    
        for data in data_points:
            return_values.append(data)
            data.clear()
    except:
        pass

    return return_values




#---------------------------------------------------------------------------------
# parse HTML and return value asked 
开发者ID:kenb123,项目名称:Basic-Expression-Lexicon-Variation-Algorithms-BELVA,代码行数:26,代码来源:belvaParseXML.py

示例4: parseHTMLxpathSearch

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def parseHTMLxpathSearch(http_source, xpathString):
#---------------------------------------------------------------------------------

    return_values = []


    http_source= str(http_source).replace('\x00','')
    try:
        html = lxml.html.fromstring(http_source)

        for data in html.xpath(xpathString):
            return_values.append(etree.tostring(data.content))
            data.clear()

    except:
        pass

    return return_values



#---------------------------------------------------------------------------------
# parse HTML and return value asked 
开发者ID:kenb123,项目名称:Basic-Expression-Lexicon-Variation-Algorithms-BELVA,代码行数:25,代码来源:belvaParseXML.py

示例5: __query_new_stocks

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def __query_new_stocks(self):
        DATA_URL = 'http://vip.stock.finance.sina.com.cn/corp/view/vRPD_NewStockIssue.php?page=1&cngem=0&orderBy=NetDate&orderType=desc'
        html = lxml.html.parse(DATA_URL)
        res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
        if six.PY2:
            sarr = [etree.tostring(node) for node in res]
        else:
            sarr = [etree.tostring(node).decode('utf-8') for node in res]
        sarr = ''.join(sarr)
        sarr = sarr.replace('<font color="red">*</font>', '')
        sarr = '<table>%s</table>' % sarr
        df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
        df = df.select(lambda x: x in [0, 1, 2, 3, 7], axis=1)
        df.columns = ['code', 'xcode', 'name', 'ipo_date', 'price']
        df['code'] = df['code'].map(lambda x: str(x).zfill(6))
        df['xcode'] = df['xcode'].map(lambda x: str(x).zfill(6))
        return df 
开发者ID:QUANTAXIS,项目名称:QUANTAXIS,代码行数:19,代码来源:shipaneclient.py

示例6: _profit_divis

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def _profit_divis(pageNo, dataArr, nextPage):
        ct._write_console()
        html = lxml.html.parse('%sdata.cfi.cn/%s'%(ct.P_TYPE['http'], nextPage))
        res = html.xpath("//table[@class=\"table_data\"]/tr")
        if ct.PY3:
            sarr = [etree.tostring(node).decode('utf-8') for node in res]
        else:
            sarr = [etree.tostring(node) for node in res]
        sarr = ''.join(sarr)
        sarr = sarr.replace('--', '0')
        sarr = '<table>%s</table>'%sarr
        df = pd.read_html(sarr, skiprows=[0])[0]
        dataArr = dataArr.append(df, ignore_index=True)
        nextPage = html.xpath('//div[@id=\"content\"]/div[2]/a[last()]/@href')[0]
        np = nextPage.split('&')[2].split('=')[1]
        if pageNo < int(np):
            return _profit_divis(int(np), dataArr, nextPage)
        else:
            return dataArr 
开发者ID:waditu,项目名称:tushare,代码行数:21,代码来源:reference.py

示例7: notice_content

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def notice_content(url):
    '''
        获取信息地雷内容
    Parameter
    --------
        url:内容链接
    
    Return
    --------
        string:信息内容
    '''
    try:
        html = lxml.html.parse(url)
        res = html.xpath('//div[@id=\"content\"]/pre/text()')[0]
        return res.strip()
    except Exception as er:
        print(str(er)) 
开发者ID:waditu,项目名称:tushare,代码行数:19,代码来源:newsevent.py

示例8: _guba_content

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def _guba_content(url):
    try:
        html = lxml.html.parse(url)
        res = html.xpath('//div[@class=\"ilt_p\"]/p')
        if ct.PY3:
            sarr = [etree.tostring(node).decode('utf-8') for node in res]
        else:
            sarr = [etree.tostring(node) for node in res]
        sarr = ''.join(sarr).replace('&#12288;', '')#.replace('\n\n', '\n').
        html_content = lxml.html.fromstring(sarr)
        content = html_content.text_content()
        ptime = html.xpath('//div[@class=\"fl_left iltp_time\"]/span/text()')[0]
        rcounts = html.xpath('//div[@class=\"fl_right iltp_span\"]/span[2]/text()')[0]
        reg = re.compile(r'\((.*?)\)') 
        rcounts = reg.findall(rcounts)[0]
        return [content, ptime, rcounts]
    except Exception:
        return ['', '', '0'] 
开发者ID:waditu,项目名称:tushare,代码行数:20,代码来源:newsevent.py

示例9: _today_ticks

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
    ct._write_console()
    for _ in range(retry_count):
        time.sleep(pause)
        try:
            html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
                                                         ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
                                                         symbol, tdate, pageNo
                                ))  
            res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
            if ct.PY3:
                sarr = [etree.tostring(node).decode('utf-8') for node in res]
            else:
                sarr = [etree.tostring(node) for node in res]
            sarr = ''.join(sarr)
            sarr = '<table>%s</table>'%sarr
            sarr = sarr.replace('--', '0')
            df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
            df.columns = ct.TODAY_TICK_COLUMNS
            df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
        except Exception as e:
            print(e)
        else:
            return df
    raise IOError(ct.NETWORK_URL_ERROR_MSG) 
开发者ID:waditu,项目名称:tushare,代码行数:27,代码来源:trading.py

示例10: get_news_content

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def get_news_content(url):
    """
    获取新闻内容
    :param url: str, 新闻链接
    :return: str, 新闻内容
    """
    content = ''
    try:
        text = disk_cache_downloader(url)
        html = lxml.etree.HTML(text)
        res = html.xpath('//*[@id="artibody" or @id="article"]//p')
        p_str_list = [lxml.etree.tostring(node).decode('utf-8') for node in res]
        p_str = ''.join(p_str_list)
        html_content = lxml.html.fromstring(p_str)
        content = html_content.text_content()
        # 清理未知字符和空白字符
        content = re.sub(r'\u3000', '', content)
        content = re.sub(r'[ \xa0?]+', ' ', content)
        content = re.sub(r'\s*\n\s*', '\n', content)
        content = re.sub(r'\s*(\s)', r'\1', content)
        content = content.strip()
    except Exception as e:
        print('get_news_content(%s) error:' % url, e)
    return content 
开发者ID:Jacen789,项目名称:rolling-news,代码行数:26,代码来源:sinanews.py

示例11: latest_content

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def latest_content(url):
    '''
        获取即时财经新闻内容
    Parameter
    --------
        url:新闻链接
    
    Return
    --------
        string:返回新闻的文字内容
    '''
    try:
        html = lxml.html.parse(url)
        res = html.xpath('//div[@id=\"artibody\"]/p')
        if ct.PY3:
            sarr = [etree.tostring(node).decode('utf-8') for node in res]
        else:
            sarr = [etree.tostring(node) for node in res]
        sarr = ''.join(sarr).replace('&#12288;', '')#.replace('\n\n', '\n').
        html_content = lxml.html.fromstring(sarr)
        content = html_content.text_content()
        return content
    except Exception as er:
        print(str(er)) 
开发者ID:andyzsf,项目名称:TuShare,代码行数:26,代码来源:newsevent.py

示例12: parseCDATA

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def parseCDATA(xml_source, xpathString):
#---------------------------------------------------------------------------------

    return_values = []

    print(xml_source)
    root = etree.fromstring(xml_source)
    for log in root.xpath(xpathString):
        return_values.append(str(log.text))

    return return_values 
开发者ID:kenb123,项目名称:Basic-Expression-Lexicon-Variation-Algorithms-BELVA,代码行数:13,代码来源:belvaParseXML.py

示例13: _dist_cotent

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def _dist_cotent(year, pageNo, retry_count, pause):
    for _ in range(retry_count):
        time.sleep(pause)
        try:
            if pageNo > 0:
                ct._write_console()
            html = lxml.html.parse(rv.DP_163_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
                     ct.PAGES['163dp'], year, pageNo))  
            res = html.xpath('//div[@class=\"fn_rp_list\"]/table')
            if ct.PY3:
                sarr = [etree.tostring(node).decode('utf-8') for node in res]
            else:
                sarr = [etree.tostring(node) for node in res]
            sarr = ''.join(sarr)
            df = pd.read_html(sarr, skiprows=[0])[0]
            df = df.drop(df.columns[0], axis=1)
            df.columns = rv.DP_163_COLS
            df['divi'] = df['plan'].map(_fun_divi)
            df['shares'] = df['plan'].map(_fun_into)
            df = df.drop('plan', axis=1)
            df['code'] = df['code'].astype(object)
            df['code'] = df['code'].map(lambda x : str(x).zfill(6))
            pages = []
            if pageNo == 0:
                page = html.xpath('//div[@class=\"mod_pages\"]/a')
                if len(page)>1:
                    asr = page[len(page)-2]
                    pages = asr.xpath('text()')
        except Exception as e:
            print(e)
        else:
            if pageNo == 0:
                return df, pages[0] if len(pages)>0 else 0
            else:
                return df
    raise IOError(ct.NETWORK_URL_ERROR_MSG) 
开发者ID:waditu,项目名称:tushare,代码行数:38,代码来源:reference.py

示例14: _get_forecast_data

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def _get_forecast_data(year, quarter, pageNo, dataArr):
    ct._write_console()
    try:
        gparser = etree.HTMLParser(encoding='GBK')
        html = lxml.html.parse(ct.FORECAST_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], 
                                                ct.PAGES['fd'], year, quarter, pageNo,
                                                ct.PAGE_NUM[1]),
                               parser=gparser)
        res = html.xpath("//table[@class=\"list_table\"]/tr")
        if ct.PY3:
            sarr = [etree.tostring(node).decode('utf-8') for node in res]
        else:
            sarr = [etree.tostring(node) for node in res]
        sarr = ''.join(sarr)
        sarr = sarr.replace('--', '0')
        sarr = '<table>%s</table>'%sarr
        df = pd.read_html(sarr)[0]
        df = df.drop([4, 5, 8], axis=1)
        df.columns = ct.FORECAST_COLS
        dataArr = dataArr.append(df, ignore_index=True)
        nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
        if len(nextPage)>0:
            pageNo = re.findall(r'\d+',nextPage[0])[0]
            return _get_forecast_data(year, quarter, pageNo, dataArr)
        else:
            return dataArr
    except Exception as e:
            print(e) 
开发者ID:waditu,项目名称:tushare,代码行数:30,代码来源:reference.py

示例15: _newstocks

# 需要导入模块: from lxml import html [as 别名]
# 或者: from lxml.html import xpath [as 别名]
def _newstocks(data, pageNo, retry_count, pause):
    for _ in range(retry_count):
        time.sleep(pause)
        ct._write_console()
        try:
            html = lxml.html.parse(rv.NEW_STOCKS_URL%(ct.P_TYPE['http'],ct.DOMAINS['vsf'],
                         ct.PAGES['newstock'], pageNo))
            res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
            if len(res) == 0:
                return data
            if ct.PY3:
                sarr = [etree.tostring(node).decode('utf-8') for node in res]
            else:
                sarr = [etree.tostring(node) for node in res]
            sarr = ''.join(sarr)
            sarr = sarr.replace('<font color="red">*</font>', '')
            sarr = '<table>%s</table>'%sarr
            df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
            df = df.drop([df.columns[idx] for idx in [12, 13, 14]], axis=1)
            df.columns = rv.NEW_STOCKS_COLS
            df['code'] = df['code'].map(lambda x : str(x).zfill(6))
            df['xcode'] = df['xcode'].map(lambda x : str(x).zfill(6))
            res = html.xpath('//table[@class=\"table2\"]/tr[1]/td[1]/a/text()')
            tag = '下一页' if ct.PY3 else unicode('下一页', 'utf-8')
            hasNext = True if tag in res else False 
            data = data.append(df, ignore_index=True)
            pageNo += 1
            if hasNext:
                data = _newstocks(data, pageNo, retry_count, pause)
        except Exception as ex:
            print(ex)
        else:
            return data 
开发者ID:waditu,项目名称:tushare,代码行数:35,代码来源:reference.py


注:本文中的lxml.html.xpath方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。