当前位置: 首页>>代码示例>>Python>>正文


Python urlgrabber.urlgrab函数代码示例

本文整理汇总了Python中urlgrabber.urlgrab函数的典型用法代码示例。如果您正苦于以下问题:Python urlgrab函数的具体用法?Python urlgrab怎么用?Python urlgrab使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了urlgrab函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: srpm_from_ticket

    def srpm_from_ticket(self):
        '''Retrieve the latest srpmURL from the buzilla URL.
        '''
        try:
            bugzillaURL = self.checklist.properties['ticketURL'].value
        except KeyError:
            # No ticket URL was given, set nothing
            return

        if not bugzillaURL:
            # No ticket URL was given, set nothing
            return

        data = urlgrabber.urlread(bugzillaURL)
        srpmList = re.compile('"((ht|f)tp(s)?://.*?\.src\.rpm)"', re.IGNORECASE).findall(data)
        if srpmList == []:
            # No SRPM was found.  Just decide not to set anything.
            return
        # Set the srpm to the last SRPM listed on the page
        srpmURL = srpmList[-1][0]
        if not srpmURL:
            # No srpm found.  Just decide not to set anything.
            return
        # Download the srpm to the temporary directory.
        urlgrabber.urlgrab(srpmURL, self.tmpDir)
        # Fill the SRPMfile properties with the srpm in the temp directory
        self.checklist.properties['SRPMfile'].value = (
                self.tmpDir + os.path.basename(srpmURL))
开发者ID:BackupTheBerlios,项目名称:qa-assistant-svn,代码行数:28,代码来源:fedoraus.py

示例2: __download_prop_file

 def __download_prop_file(self):
     """ download prop file and validate """
     # retry 3 times download prop file
     for _ in range(3):
         try:
             sotimeout = float(pylons.config['download_thread_sotimeout'])
             proxies = json.loads(pylons.config['urlgrabber_proxies'])
             urlgrabber.urlgrab(
                         self.__uriDict['propUri'], 
                         self.__uriDict['propPath'], 
                         keepalive = 0, 
                         timeout = sotimeout,
                         proxies = proxies)
             break
         except Exception:
             randsleep = randint(30, 60)                
             time.sleep(randsleep)
     
     if (not os.path.exists(self.__uriDict['propPath'])):
         raise AgentException(Errors.DC_MISSING_PROP_FILE,
                         'Prop file (%s) does not exist' % (self.__uriDict['propPath']))
     
     if not PackageUtil.validateProp(self.__uriDict['propPath']):
         raise AgentException(Errors.DC_MISSING_PROP_FILE,
                         'Prop file (%s) failed validation' % (self.__uriDict['propPath']))
开发者ID:cronuspaas,项目名称:cronusagent,代码行数:25,代码来源:download_thread.py

示例3: fetch

    def fetch (self):
        """Return value: Fetched file's full path.."""

        # import urlgrabber module
        try:
            import urlgrabber
        except ImportError:
            raise FetchError(_('Urlgrabber needs to be installed to run this command'))

        if not self.url.filename():
            raise FetchError(_('Filename error'))

        if not os.access(self.destdir, os.W_OK):
            raise FetchError(_('Access denied to write to destination directory: "%s"') % (self.destdir))

        if os.path.exists(self.archive_file) and not os.access(self.archive_file, os.W_OK):
            raise FetchError(_('Access denied to destination file: "%s"') % (self.archive_file))

        try:
            urlgrabber.urlgrab(self.url.get_uri(),
                           self.partial_file,
                           progress_obj = UIHandler(self.progress),
                           http_headers = self._get_http_headers(),
                           ftp_headers  = self._get_ftp_headers(),
                           proxies      = self._get_proxies(),
                           throttle     = self._get_bandwith_limit(),
                           reget        = self._test_range_support(),
                           user_agent   = 'PiSi Fetcher/' + pisi.__version__)
        except urlgrabber.grabber.URLGrabError, e:
            raise FetchError(_('Could not fetch destination file "%s": %s') % (self.archive_file, e))
开发者ID:dhirajkhatiwada1,项目名称:uludag,代码行数:30,代码来源:fetcher.py

示例4: _batch_download

    def _batch_download(self, uris, local_path=None, throttle=0):
        """Downloads a package from specified uri. This is a W.I.P!!!

        Args:
            uris (list of strings) - Uris of the package to download.
            local_path (string) - Full path where the package is be saved.
                Do not include a file name.
            throttle (int) - Number of kilobytes to throttle the bandwidth by.
                If throttle == 0, throttling is disabled.

        Returns:
            True if package downloaded successfully. False otherwise.
        """

        success = False

        if throttle != 0:
            throttle *= 1024

        for uri in uris:
            try:

                if local_path:

                    name = uri.split('/')[-1]
                    if '?' in name:
                        name = name.split('?')[0]

                    path = os.path.join(local_path, name)

                else:

                    urlgrab(uri, throttle=throttle)
            except Exception as e:
                logger.exception(e)
开发者ID:MiguelMoll,项目名称:vFense,代码行数:35,代码来源:packagegrabber.py

示例5: run

   def run(self,force=False):
       """
       Download bootloader content for all of the latest bootloaders, since the user
       has chosen to not supply their own.  You may ask "why not get this from yum", though
       Fedora has no IA64 repo, for instance, and we also want this to be able to work on Debian and
       further do not want folks to have to install a cross compiler.  For those that don't like this approach
       they can still source their cross-arch bootloader content manually.
       """

       content_server = "http://mdehaan.fedorapeople.org/loaders"
       dest = "/var/lib/cobbler/loaders"

       files = (
          ( "%s/README" % content_server, "%s/README" % dest ),
          ( "%s/COPYING.elilo" % content_server, "%s/COPYING.elilo" % dest ),
          ( "%s/COPYING.yaboot" % content_server, "%s/COPYING.yaboot" % dest),
          ( "%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest),
          ( "%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest ),
          ( "%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest),
          ( "%s/pxelinux.0-3.61" % content_server, "%s/pxelinux.0" % dest),
          ( "%s/menu.c32-3.61" % content_server, "%s/menu.c32" % dest),
       )

       self.logger.info("downloading content required to netboot all arches")
       for f in files:
          src = f[0]
          dst = f[1]
          if os.path.exists(dst) and not force:
             self.logger.info("path %s already exists, not overwriting existing content, use --force if you wish to update" % dst)
             continue
          self.logger.info("downloading %s to %s" % (src,dst))
          urlgrabber.urlgrab(src,dst)

       return True
开发者ID:GunioRobot,项目名称:cobbler,代码行数:34,代码来源:action_dlcontent.py

示例6: download_file

def download_file(uri, dl_path, throttle):
    if uri.startswith('https://api.github.com/'):
        # TODO: handle 200 and 302 response
        headers = (("Accept", "application/octet-stream"),)
        urlgrab(uri, filename=dl_path, throttle=throttle, http_headers=headers)

    else:
        urlgrab(uri, filename=dl_path, throttle=throttle)
开发者ID:akaasjager,项目名称:vFense,代码行数:8,代码来源:downloader.py

示例7: downloadPackage

    def downloadPackage(self):

        # download the package
        urlgrabber.urlgrab(self.testPkgUri, self.localPkgName)
        urlgrabber.urlgrab(self.testPkgUri + '.prop', self.localPkgName + '.prop')

        LOG.debug('localpackagename = %s', self.localPkgName)
        assert os.path.exists(self.localPkgName + '.prop')
        assert os.path.exists(self.localPkgName)
开发者ID:cronuspaas,项目名称:cronusagent,代码行数:9,代码来源:test_package.py

示例8: page_download

def page_download(page_url, folder):
    page = urllib2.urlopen(page_url)
    soup = BeautifulSoup(page)
    print len(soup.find_all("a", { "class" : "next" }))
    for src in soup.find_all('img'):
        if src.get('src').endswith(sfx):
            tgt_url = str(src.get('src').replace('small', 'big'))
            print "saving : " + tgt_url 
            tgt_name = os.path.basename(tgt_url)
            try:
                urlgrabber.urlgrab(tgt_url, "./" + folder + "/" + tgt_name, progress_obj=urlgrabber.progress.TextMeter())
            except urlgrabber.grabber.URLGrabError as detail:
                print "Error eccours: " + detail
开发者ID:donie,项目名称:playground,代码行数:13,代码来源:gh_downloader.py

示例9: fetch_image_files

def fetch_image_files(layer, opts):
    if opts.layer:
        path = str(opts.layer)
        if not opts.test and not os.path.isdir(path):
            os.makedirs(path)
    else:
        path = "."
    for image in layer["images"]:
        filetype = image["url"].split(".")[-1]
        target = os.path.join(path, image["hash"] + "." + filetype)
        if opts.test:
            print >>sys.stderr, image["url"], "->", target
        else:
            meter = urlgrabber.progress.text_progress_meter()
            urlgrabber.urlgrab(image["url"], target, progress_obj=meter)
开发者ID:crschmidt,项目名称:oam,代码行数:15,代码来源:oam-fetch.py

示例10: updateLocalDb

 def updateLocalDb():
     try:
         if urlgrabber.urlgrab(self.remote_db, self.local_db) == self.local_db:
             updateLocalSum()
             return True
     except urlgrabber.grabber.URLGrabError:
         return False
开发者ID:Pardus-Linux,项目名称:appinfo,代码行数:7,代码来源:client.py

示例11: grab

def grab(url, filename, timeout=120, retry=5, proxy=None, ftpmode=False):
    print "Grabbing", url
    def grab_fail_callback(data):
        # Only print debug here when non fatal retries, debug in other cases
        # is already printed
        if (data.exception.errno in retrycodes) and (data.tries != data.retry):
            print "grabbing retry %d/%d, exception %s"%(
                data.tries, data.retry, data.exception)
    try:
        retrycodes = urlgrabber.grabber.URLGrabberOptions().retrycodes
        if 12 not in retrycodes:
            retrycodes.append(12)
        if not os.path.exists(os.path.dirname(filename)):
            os.makedirs(os.path.dirname(filename))
        downloaded_file = urlgrabber.urlgrab(
            url, filename,timeout=timeout,retry=retry, retrycodes=retrycodes,
            progress_obj=SimpleProgress(), failure_callback=grab_fail_callback,
            copy_local=True, proxies=proxy, ftp_disable_epsv=ftpmode)
        if not downloaded_file:
            return False
    except urlgrabber.grabber.URLGrabError as e:
        warn('URLGrabError %i: %s' % (e.errno, e.strerror))
        if os.path.exists(filename):
            os.unlink(filename)
        return False
    return True
开发者ID:oe-lite-bananapi-r1,项目名称:core,代码行数:26,代码来源:url.py

示例12: fetch_jetty

 def fetch_jetty(self):
     """Download the requested version of Jetty"""
     if path.exists(self.home):
         return
     url = self.node.config.get('jetty','REPO') + self.version + "/jetty-distribution-" + self.version + ".tar.gz"
     if not path.exists(self.cachedir):
         os.makedirs(self.cachedir)
     f = tempfile.mktemp(prefix='jetty-' + self.version + '-', suffix='.tar.gz')
     try:
         print("Downloading Jetty from " + url)
         meter = urlgrabber.progress.TextMeter()
         urlgrabber.urlgrab(url, filename=f, progress_obj=meter)
         subprocess.check_call(["tar", "-x", "-C", self.cachedir, "-f", f])
     finally:
         os.remove(f)
     os.rename(path.join(self.cachedir, 'jetty-distribution-' + self.version), self.home)
开发者ID:nla,项目名称:jvmctl,代码行数:16,代码来源:jvmctl.py

示例13: __init__

 def __init__(self):
     data = StringIO.StringIO(urlgrabber.urlread("http://itunes.com/version"))
     stream = gzip.GzipFile(fileobj=data)
     data = stream.read()
     updates = plistlib.readPlistFromString(data)
     devs = self.findPods()
     for (dev, name, family, firmware) in devs:
         if not family:
             family, firmware = self.getIPodData(dev)
         print "Found %s with family %s and firmware %s" % (name, family, firmware)
         if updates["iPodSoftwareVersions"].has_key(unicode(family)):
             uri = updates["iPodSoftwareVersions"][unicode(family)]["FirmwareURL"]
             print "Latest firmware: %s" % uri
             print "Fetching firmware..."
             path = urlgrabber.urlgrab(
                 uri, progress_obj=urlgrabber.progress.text_progress_meter(), reget="check_timestamp"
             )
             print "Extracting firmware..."
             zf = zipfile.ZipFile(path)
             for name in zf.namelist():
                 if name[:8] == "Firmware":
                     print "Firmware found."
                     outfile = open("Firmware", "wb")
                     outfile.write(zf.read(name))
                     outfile.close()
                     infile = open("Firmware", "rb")
                     outfile = open(dev, "wb")
                     # FIXME: do the following in pure python?
                     print "Making backup..."
                     commands.getoutput("dd if=%s of=Backup" % dev)
                     print "Uploading firmware..."
                     commands.getoutput("dd if=Firmware of=%s" % dev)
         print "Done."
开发者ID:cberetta,项目名称:ipod-update,代码行数:33,代码来源:ipod-update.py

示例14: getRemoteURL

def getRemoteURL(url):
    logger.info('downloading %s' % url)
    start = time.time()
    try: 
        fileName = urlgrabber.urlgrab(url, config.localOSMPath)
        fileSize = os.path.getsize(fileName)
    except Exception, e:
        logger.warning('urlgrabber: %s' % e.strerror)
        return(None)
开发者ID:gangele397,项目名称:jaunt,代码行数:9,代码来源:fetcher.py

示例15: archive_downloader

def archive_downloader(i):
    list_name = i[0]
    year = i[1]
    month = i[2]
    if not list_name or not year or not month:
        return
    basename = "{0}-{1}.txt.gz".format(year, month)
    filename = "http://lists.fedoraproject.org/pipermail/{0}/{1}".format(list_name, basename)
    try:
        urlgrabber.urlgrab(filename)
        pos = str(months.index(month) + 1)
        if len(pos) == 1:
            pos = "0{0}".format(pos)
        newname = "{0}-{1}-{2}-{3}.txt".format(list_name, year, pos, month)
        with open(newname, "w") as f:
            f.write(gzip.open(basename).read())
        print "== {0} downloaded ==".format(filename)
    except urlgrabber.grabber.URLGrabError:
        pass
开发者ID:pypingou,项目名称:mongomail,代码行数:19,代码来源:get_mbox.py


注:本文中的urlgrabber.urlgrab函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。