当前位置: 首页>>代码示例>>Python>>正文


Python urllib.urlcleanup方法代码示例

本文整理汇总了Python中urllib.urlcleanup方法的典型用法代码示例。如果您正苦于以下问题:Python urllib.urlcleanup方法的具体用法?Python urllib.urlcleanup怎么用?Python urllib.urlcleanup使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在urllib的用法示例。


在下文中一共展示了urllib.urlcleanup方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: refresh_overview

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def refresh_overview(self):
    urllib.urlcleanup()
    attempts_remaining = 5
    while attempts_remaining > 0:
      try:
        self.overview = mlbgame.overview(self.current_game().game_id)
        self.__update_layout_state()
        self.needs_refresh = False
        self.print_overview_debug()
        self.network_issues = False
        break
      except URLError, e:
        self.network_issues = True
        debug.error("Networking Error while refreshing the current overview. {} retries remaining.".format(attempts_remaining))
        debug.error("URLError: {}".format(e.reason))
        attempts_remaining -= 1
        time.sleep(NETWORK_RETRY_SLEEP_TIME)
      except ValueError:
        self.network_issues = True
        debug.error("Value Error while refreshing current overview. {} retries remaining.".format(attempts_remaining))
        debug.error("ValueError: Failed to refresh overview for {}".format(self.current_game().game_id))
        attempts_remaining -= 1
        time.sleep(NETWORK_RETRY_SLEEP_TIME)

    # If we run out of retries, just move on to the next game 
开发者ID:MLB-LED-Scoreboard,项目名称:mlb-led-scoreboard,代码行数:27,代码来源:data.py

示例2: download_file

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def download_file(url, path, attempt=0):
	try:
		urllib.urlretrieve(url, path)
		urllib.urlcleanup()
	except Exception as e:
		if not attempt == 3:
			attempt += 1
			print("[E] ({:d}) Download failed: {:s}.".format(attempt, str(e)))
			print("[W] Trying again in 5 seconds.")
			time.sleep(5)
			download_file(url, path, attempt)
		else: 
			print("[E] Retry failed three times, skipping file.")
			print('-' * 70) 
开发者ID:dvingerh,项目名称:PyInstaStories,代码行数:16,代码来源:pyinstastories.py

示例3: dash_R_cleanup

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def dash_R_cleanup(fs, ps, pic):
    import gc, copy_reg
    import _strptime, linecache, dircache
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    struct._cache.clear()
    filecmp._cache.clear()
    doctest.master = None

    # Collect cyclic trash.
    gc.collect() 
开发者ID:ofermend,项目名称:medicare-demo,代码行数:32,代码来源:regrtest.py

示例4: download_file

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def download_file(url, destfile):
    """
     download_file: function for download from url to save as destfile
        @url the source file to download.
        @destfile the destination save file for local.
    """
    file_url = url

    try:
        print("--> Downloading file: %s" % file_url)
        filename, msg = urllib.urlretrieve(
                #'http://code.jquery.com/jquery-2.1.1.js',
                file_url,
                reporthook = reporthook)

        print ""
        #print "File:", filename
        print "Header:"
        print msg
        if os.path.exists(filename):
            if os.path.exists(destfile):
                now = currenttime()
                tmpfile = "%s.%s" % (destfile, now)
                shutil.move(destfile, tmpfile)
            shutil.move(filename, destfile)

        #print 'File exists before cleanup:', os.path.exists(filename)
    finally:
        urllib.urlcleanup()
        #print 'File still exists:', os.path.exists(filename) 
开发者ID:soarpenguin,项目名称:python-scripts,代码行数:32,代码来源:urllibdownload.py

示例5: http

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def http(fetch):
    """Decorator for downloading files from HTTP sites."""
    @wraps(fetch)
    def wrapper(*args, **kwargs):
        url, bbox, dt = fetch(*args, **kwargs)
        outpath = tempfile.mkdtemp()
        filename = url.format(dt.year, dt.month, dt.day)
        try:
            lfilename = filename.split("/")[-1]
            urllib.urlcleanup()
            urllib.urlretrieve(filename, "{0}/{1}".format(outpath, lfilename))
        except:
            lfilename = None
        return outpath, lfilename, bbox, dt
    return wrapper 
开发者ID:nasa,项目名称:RHEAS,代码行数:17,代码来源:decorators.py

示例6: refresh_games

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def refresh_games(self):
    debug.log("Updating games for {}/{}/{}".format(self.month, self.day, self.year))
    urllib.urlcleanup()
    attempts_remaining = 5
    while attempts_remaining > 0:
      try:
        current_day = self.day
        self.set_current_date()

        all_games = mlbgame.day(self.year, self.month, self.day)
        if self.config.rotation_only_preferred:
          self.games = self.__filter_list_of_games(all_games, self.config.preferred_teams)
        else:
          self.games = all_games

        if current_day != self.day:
          self.current_game_index = self.game_index_for_preferred_team()
        self.games_refresh_time = time.time()
        self.network_issues = False
        break
      except URLError, e:
        self.network_issues = True
        debug.error("Networking error while refreshing the master list of games. {} retries remaining.".format(attempts_remaining))
        debug.error("URLError: {}".format(e.reason))
        attempts_remaining -= 1
        time.sleep(NETWORK_RETRY_SLEEP_TIME)
      except ValueError:
        self.network_issues = True
        debug.error("Value Error while refreshing master list of games. {} retries remaining.".format(attempts_remaining))
        debug.error("ValueError: Failed to refresh list of games")
        attempts_remaining -= 1
        time.sleep(NETWORK_RETRY_SLEEP_TIME) 
开发者ID:MLB-LED-Scoreboard,项目名称:mlb-led-scoreboard,代码行数:34,代码来源:data.py

示例7: fetch_preferred_team_overview

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def fetch_preferred_team_overview(self):
    if not self.is_offday_for_preferred_team():
      urllib.urlcleanup()
      game = self.games[self.game_index_for_preferred_team()]
      game_overview = mlbgame.overview(game.game_id)
      debug.log("Preferred Team's Game Status: {}, {} {}".format(game_overview.status, game_overview.inning_state, game_overview.inning))
      return game_overview 
开发者ID:MLB-LED-Scoreboard,项目名称:mlb-led-scoreboard,代码行数:9,代码来源:data.py

示例8: dash_R_cleanup

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
    import gc, copy_reg
    import _strptime, linecache
    dircache = test_support.import_module('dircache', deprecated=True)
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Clear the warnings registry, so they can be displayed again
    for mod in sys.modules.values():
        if hasattr(mod, '__warningregistry__'):
            del mod.__warningregistry__

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)
    try:
        import zipimport
    except ImportError:
        pass # Run unmodified on platforms without zipimport support
    else:
        zipimport._zip_directory_cache.clear()
        zipimport._zip_directory_cache.update(zdc)

    # clear type cache
    sys._clear_type_cache()

    # Clear ABC registries, restoring previously saved ABC registries.
    for abc, registry in abcs.items():
        abc._abc_registry = registry.copy()
        abc._abc_cache.clear()
        abc._abc_negative_cache.clear()

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    filecmp._cache.clear()
    struct._clearcache()
    doctest.master = None
    try:
        import ctypes
    except ImportError:
        # Don't worry about resetting the cache if ctypes is not supported
        pass
    else:
        ctypes._reset_cache()

    # Collect cyclic trash.
    gc.collect() 
开发者ID:dxwu,项目名称:BinderFilter,代码行数:61,代码来源:regrtest.py

示例9: get_request

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def get_request(test):
    # perform GET request

    urllib.urlcleanup() # clear cache

    try:
        user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
        req_headers = { 'User-Agent' : user_agent }
        for each in test['headers']:
            key, val = each.split(":", 1)
            key = key.lstrip()
            val = val.lstrip()
            req_headers[key] = val
        if test['requestCookie'] or test['requestCSRF']:
            # request cookie and csrf token if set in module XML
            cookie_val, csrf_val = request_value(test)
            if cookie_val:
                req_headers['cookie'] = cookie_val
            if csrf_val:
                # replace <CSRFTOKEN> with the collected token
                test['url'] = test['url'].replace("<CSRFTOKEN>", csrf_val)
                test['postParameters'] = test['postParameters'].replace("<CSRFTOKEN>", csrf_val)
                test['headers'] = [h.replace('<CSRFTOKEN>', csrf_val) for h in test['headers']]

        if opts.debug:
            # print debug output
            print textwrap.fill((" [ ] URL (GET): %s" % test['url']),
                initial_indent='', subsequent_indent=' -> ', width=80)
            print

        # assign NullHTTPErrorProcessor as default opener
        opener = urllib2.build_opener(NullHTTPErrorProcessor())
        urllib2.install_opener(opener)

        req = urllib2.Request(test['url'], headers=req_headers)
        f = urllib2.urlopen(req)
        r_body = f.read()
        r_info = f.info()
        f.close()

        # handle instances where the response body is 0 bytes in length
        if not r_body:
            print " [" + color['red'] + "!" + color['end'] + "] Zero byte response received from %s" \
                % test['name']
            r_body = "<Scythe Message: Empty response from server>"

        # returned updated test and response data
        return test, r_body, r_info, req

    except Exception:
        print textwrap.fill((" [" + color['red'] + "!" + color['end'] + "] Error contacting %s" \
            % test['url']), initial_indent='', subsequent_indent='\t', width=80)
        if opts.debug:
            for ex in traceback.format_exc().splitlines():
                print textwrap.fill((" %s" \
                    % str(ex)), initial_indent='', subsequent_indent='\t', width=80)
            print
        return test, False, False, req 
开发者ID:AnasAboureada,项目名称:Penetration-Testing-Study-Notes,代码行数:60,代码来源:scythe.py

示例10: dash_R_cleanup

# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlcleanup [as 别名]
def dash_R_cleanup(fs, ps, pic, abcs):
    import gc, copy_reg
    import _strptime, linecache
    dircache = test_support.import_module('dircache', deprecated=True)
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Clear the warnings registry, so they can be displayed again
    for mod in sys.modules.values():
        if hasattr(mod, '__warningregistry__'):
            del mod.__warningregistry__

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)

    # clear type cache
    sys._clear_type_cache()

    # Clear ABC registries, restoring previously saved ABC registries.
    for abc, registry in abcs.items():
        abc._abc_registry = registry.copy()
        abc._abc_cache.clear()
        abc._abc_negative_cache.clear()

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    filecmp._cache.clear()
    struct._clearcache()
    doctest.master = None

    # Collect cyclic trash.
    gc.collect() 
开发者ID:Acmesec,项目名称:CTFCrackTools-V2,代码行数:47,代码来源:regrtest.py


注:本文中的urllib.urlcleanup方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。