当前位置: 首页>>代码示例>>Python>>正文


Python request.read函数代码示例

本文整理汇总了Python中urllib.request.read函数的典型用法代码示例。如果您正苦于以下问题:Python read函数的具体用法?Python read怎么用?Python read使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了read函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: createItemList

def createItemList():
	request = urllib.request.urlopen('http://api.walmartlabs.com/v1/taxonomy?format=json&apiKey=tkbnu8astb9xxtn2ux9vw73b')
	response = request.read()
	jdict = json.loads(response.decode())
	categories = []
	items = {}

	for i in jdict['categories']:
		categories.append(i['id'])

	nums = random.sample(range(0,len(categories)), 3)

	reqStr1 = 'http://api.walmartlabs.com/v1/paginated/items?format=json&&category='+categories[nums[0]]+'&apiKey=tkbnu8astb9xxtn2ux9vw73b'
	reqStr2 = 'http://api.walmartlabs.com/v1/paginated/items?format=json&&category='+categories[nums[1]]+'&apiKey=tkbnu8astb9xxtn2ux9vw73b'
	reqStr3 = 'http://api.walmartlabs.com/v1/paginated/items?format=json&&category='+categories[nums[2]]+'&apiKey=tkbnu8astb9xxtn2ux9vw73b'
	request = urllib.request.urlopen(reqStr1)
	response = request.read()
	jdict = json.loads(response.decode())
	addToItemList(jdict, items)

	request = urllib.request.urlopen(reqStr2)
	response = request.read()
	jdict = json.loads(response.decode())
	addToItemList(jdict, items)

	request = urllib.request.urlopen(reqStr3)
	response = request.read()
	jdict = json.loads(response.decode())
	addToItemList(jdict, items)

	return items
开发者ID:JonathanNix,项目名称:scripting,代码行数:31,代码来源:screenScrape.py

示例2: _access_project

def _access_project():
    """
    Call the homepage of the project for given branch if an url is set. This is a cheap way to fill the lru cache.
    """
    if hasattr(env, 'url'):
        # wait for uwsgi-restart after touch.
        time.sleep(10)
        for lang in settings.LANGUAGES:
            url = urllib.request.urlopen(env.url.format(lang[0]))
            with contextlib.closing(url) as request:
                request.read()
                print('Read response from: {}'.format(request.url))
开发者ID:CDE-UNIBE,项目名称:qcat,代码行数:12,代码来源:__init__.py

示例3: get_nhl_live_games

def get_nhl_live_games(self, e, webCall=False):
    if e.input:
        today = e.input
    else:
        today = datetime.date.today().strftime("%Y-%m-%d")
    url = "http://live.nhle.com/GameData/GCScoreboard/{}.jsonp".format(today)
    request = urllib.request.urlopen(url)
    data = request.read().decode()[15:-2]
    data = json.loads(data)

    games = []
    for game in data['games']:
        if not game['bsc']:
            start = game['bs'].replace(':00 ', ' ')
            gametxt = "{} - {} ({} ET)".format(game['atcommon'].title(),
                                               game['htcommon'].title(),
                                               start)
        else:
            gametxt = "{} {} - {} {} ({})".format(game['atcommon'].title(),
                                                  game['ats'],
                                                  game['hts'],
                                                  game['htcommon'].title(),
                                                  game['bs'])
        games.append(gametxt)

    if webCall:
        return " | ".join(games)

    e.output = " | ".join(games)
    return e
开发者ID:KpaBap,项目名称:genmaybot,代码行数:30,代码来源:nhl.py

示例4: listen

    def listen(self):
        logging.debug(u'OnigiriAlert.listen() started.')

        url = TWITCASTING_API_LIVE_STATUS + '?type=json&user=' + self.user
        last_is_live = False

        while True:
            try:
                request = urllib.request.urlopen(url)
                encoding = request.headers.get_content_charset()
                response = request.read().decode(encoding)
                # logging.debug(response)

                parsed = json.loads(response)
                logging.debug(parsed)
                is_live = parsed["islive"]

                if last_is_live is False and is_live is True or DEBUG_FORCE_PUSH:
                    self.notify(parsed)
                    if DEBUG_FORCE_PUSH:
                        os.sys.exit()

                last_is_live = is_live
                # raise Exception('test exception')
            except Exception as error:
                logging.error("caught exception in polling loop, error: [{}]".format(error))
                # os.sys.exit()

            time.sleep(POLLING_INTERVAL)

        logging.debug(u'OnigiriAlert.listen() ended.')
开发者ID:honishi,项目名称:onigiri-alert-server,代码行数:31,代码来源:onigiri.py

示例5: main

def main():
    """Main function"""

    # PARSE OPTIONS ###########################################################

    parser = argparse.ArgumentParser(description='A BeautifulSoup snippet.')
    parser.add_argument("url", nargs=1, metavar="URL",
                        help="The URL of the webpage to parse.")
    args = parser.parse_args()

    url = args.url[0]
    #print("url:", url)

    # GET HTML ################################################################

    request = urllib.request.urlopen(url)
    #print("STATUS:", request.status)

    html = request.read()
    #print(html)

    # PARSE HTML ##############################################################

    soup = BeautifulSoup(html)

    #print(soup.prettify())

    for img in soup.find_all('img'):
        print(img.get('src'))
开发者ID:jeremiedecock,项目名称:snippets,代码行数:29,代码来源:get_images.py

示例6: dnsHistory

def dnsHistory(domain):
    rows = ''
    print("\n-- Checking dns history --")
    url = 'http://toolbar.netcraft.com/site_report?url=' + domain
    try:
        request = urllib.request.urlopen(url)
        html = request.read()
    except:
        html = ''
    soup = BeautifulSoup(''.join(html))
    tables = soup.findAll(attrs={'class': 'TBtable'})
    try:
        table = tables[1]
    except:
        table = ''  # Prevents errors if no history returned
        rows = ''
    if table:
        rows = soup.table.findAll('tr')  # Need to edit out again
    x = -1
    try:
        for tr in rows:
            columns = tr.findAll('td')
            for td in columns:
                text = ''.join(td.find(text=True))
                if x % 5 == 0:  # Only ip addresses are checked
                    if dns.query(text):  # Finds last ip thats not CloudFlare
                        print(output("The last known ip address is: %s" % text))
                        if text not in iplist:
                            iplist.append(text)
                        raise End  # Breaks from multiple loops
                x += 1
    except End:
        pass
    print("\n#" + "-" * 77 + "#")
开发者ID:alma4rebi,项目名称:V3n0M-Scanner,代码行数:34,代码来源:cloud.py

示例7: family_download_json

 def family_download_json(self, family):
     """
     Download json information from internet. It does not
     save any data anywhere.
     """
     request = urllib.request.urlopen(self.family_download_url(family))
     return json.loads(request.read().decode('utf-8'))
开发者ID:vfrico,项目名称:fontsquirrel_dl,代码行数:7,代码来源:fontsquirrel.py

示例8: download

    def download(self, name, md5):
        url = url_prefix + name
        response = ""
        try: 
            request = urlopen(url)
            txt = request.read()
            #.decode('utf-8')
        except urllib.error.HTTPError as e:
            print('Unable to get %s - HTTPError = %s' % (name, e.reason))
            return False
        except urllib.error.URLError as e:
            print ('Unable to get %s - URLError = %s' % (name, e.reason))
            return False
        except httplib.error.HTTPException as e:
            print ('Unable to get %s - HTTPException' % name)
            return False
        except Exception as e:
            import traceback
            print ('Unable to get %s - Exception = %s' % (name, traceback.format_exc()))
            return False

        web_md5 = self.get_md5(txt)
        
        if web_md5 != md5:
            print("Checksum error in %s. Download aborted" % name)
            return False
        
        new_name = os.path.join(base_dir, name + "." + self.web_version.replace(".", "_"))
        
        with open(new_name, "wb") as f:
            f.write(txt)

        return True
开发者ID:Astalaseven,项目名称:Cnchi,代码行数:33,代码来源:updater.py

示例9: pywget_inside_crawler

def pywget_inside_crawler(url, depth, start_dir, start_file, root_dir_name):
    """
        Crawl the given url find all <a href> and <img src> tags
        Get the information inside the tags and apply pywget_recursive() function on each of them

        Arguments:
        url                -- the url that is to be crawler
        depth              -- total number of recursions
        start_dir          -- the directory of the this py file
        start_file         -- the first file that was downloaded, store it to avoid cycles
        root_dir_name      -- the root derectory to for downloading files
    """
    depth -= 1

    content = ''
    try:
        request = urllib.request.urlopen(url)
        content = request.read().decode("utf-8")
    except:
        pass

    # all the information that's inside <a href> and <img src> tags
    match = re.findall(r'<a href="(.*?)"', content) + \
            re.findall(r'<a href = "(.*?)"', content) + \
            re.findall(r'<img src="(.*?)"', content) + \
            re.findall(r'<img src = "(.*?)"', content)

    prefix = url[0 : url.rfind('/')]                                           # a prefix of the link. useful to check if a link is under the same domain

    all_item_list = add_item_to_list(match, prefix)                            # add information to a list

    for item in all_item_list:
        pywget_recursive(item, depth, start_dir, start_file, root_dir_name)    # recursively download the information
开发者ID:xuanshenbo,项目名称:Web-crawler,代码行数:33,代码来源:challenge.py

示例10: get_data_source_one

 def get_data_source_one(self):
     """Retrieves Data from the first Yahoo Finance source"""
     data = 'http://finance.yahoo.com/webservice/v1/symbols/' + self.stock + '/quote?format=json&view=detail'
     request = urllib.request.urlopen(data)
     response = request.read()
     charset = request.info().get_content_charset('utf-8')
     self.data_s1 = json.loads(response.decode(charset))
开发者ID:shanedonovan,项目名称:yahoo-finance,代码行数:7,代码来源:yahoo_finance.py

示例11: main

def main():
    """Main function"""

    # PARSE OPTIONS ###########################################################

    parser = argparse.ArgumentParser(description='A BeautifulSoup snippet.')
    parser.add_argument("url", nargs=1, metavar="URL",
                        help="The URL of the webpage to parse.")
    args = parser.parse_args()

    url = args.url[0]
    print("url:", url)

    # GET HTML ################################################################

    request = urllib.request.urlopen(url)
    print("STATUS:", request.status)

    html = request.read()
    #print(html)

    # PARSE HTML ##############################################################

    soup = BeautifulSoup(html)

    print(soup.prettify())

    print("Element name:", soup.title.name)
    print("Element value:", soup.title.string)

    print()

    for anchor in soup.find_all('a'):
        print(anchor.get('href'))
开发者ID:jeremiedecock,项目名称:snippets,代码行数:34,代码来源:test.py

示例12: __init__

    def __init__(self, force_update):
        self.web_version = ""
        self.web_files = []

        response = ""
        try:
            update_info_url = _url_prefix + "update.info"
            request = urlopen(update_info_url)
            response = request.read().decode("utf-8")

        except urllib.HTTPError as e:
            logging.exception("Unable to get latest version info - HTTPError = %s" % e.reason)
        except urllib.URLError as e:
            logging.exception("Unable to get latest version info - URLError = %s" % e.reason)
        except httplib.HTTPException as e:
            logging.exception("Unable to get latest version info - HTTPException")
        except Exception as e:
            import traceback

            logging.exception("Unable to get latest version info - Exception = %s" % traceback.format_exc())

        if len(response) > 0:
            updateInfo = json.loads(response)

            self.web_version = updateInfo["version"]
            self.web_files = updateInfo["files"]

            logging.info("Cnchi Internet version: %s" % self.web_version)

            self.force = force_update
开发者ID:pombredanne,项目名称:lution,代码行数:30,代码来源:updater.py

示例13: download

 def download(self, path):
     """Scarica o mantieni il file"""
     target_path = self._generate_path(path)
     target_file = os.path.join(target_path, self.name)
     downf = not os.path.exists(target_file)
     if not downf: 
         """ A questo livello, il file esiste"""
         self.path = target_file
         self.directory = target_path
     downf = downf or (self.size != os.path.getsize(target_file))
     if downf:
         try:
             request = urllib.request.urlopen(self.url)
             f = open(target_file, 'wb')
             while True:
                 data = request.read(100*1024)
                 if data:
                     print("""downloading %s (%d/%d)\r""" % 
                             (self.name, os.path.getsize(target_file), self.size))
                     f.write(data)
                 else:
                     break
             print("""%s completed""" % (self.name))
             f.close()
             self.path = target_file
             self.directory = target_path
         except urllib.error.HTTPError:
             path = None
开发者ID:ilario-pierbattista,项目名称:hasp-tracker,代码行数:28,代码来源:dataset_downloader.py

示例14: Challenge13

def Challenge13():
  import xmlrpc.client
  import urllib.request
  
  startAddr = 'http://www.pythonchallenge.com/pc/return/evil4.jpg'
  resultAddr = 'http://www.pythonchallenge.com/pc/return/' 

  XMLRPCserver = xmlrpc.client.Server(
    'http://www.pythonchallenge.com/pc/phonebook.php'
  )

  auth_handler = urllib.request.HTTPBasicAuthHandler()
  auth_handler.add_password(realm='inflate',
                            uri=startAddr,
                            user='huge',
                            passwd='file')
  opener = urllib.request.build_opener(auth_handler)
  urllib.request.install_opener(opener)
  request = urllib.request.urlopen(startAddr)

  rData = request.read().decode()
  
  evilName = rData.split()[0]
  
  resultAddr += XMLRPCserver.phone(evilName).split('-')[1].lower() + '.html'
  print(resultAddr)
开发者ID:jakamitsu,项目名称:pythonchallenge.com-solutions,代码行数:26,代码来源:solver.py

示例15: getJSON

	def getJSON(self, url):
		try:
			request = urllib.request.urlopen(url)
			data = json.loads(request.read().decode('UTF-8'))
			return data
		except urllib.error.URLError as e:
			logging.warning("Error: TWITCH API connection")
开发者ID:choigit,项目名称:kritzbot,代码行数:7,代码来源:api.py


注:本文中的urllib.request.read函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。