当前位置: 首页>>代码示例>>Python>>正文


Python cssutils.CSSParser类代码示例

本文整理汇总了Python中cssutils.CSSParser的典型用法代码示例。如果您正苦于以下问题:Python CSSParser类的具体用法?Python CSSParser怎么用?Python CSSParser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了CSSParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_finish

    def test_finish(self):
        """
        L{StylesheetRewritingRequestWrapper.finish} causes all written bytes to
        be translated with C{_replace} written to the wrapped request.
        """
        stylesheetFormat = """
            .foo {
                background-image: url(%s)
            }
        """
        originalStylesheet = stylesheetFormat % ("/Foo/bar",)
        expectedStylesheet = stylesheetFormat % ("/bar/Foo/bar",)

        request = FakeRequest()
        roots = {request: URL.fromString('/bar/')}
        wrapper = website.StylesheetRewritingRequestWrapper(
            request, [], roots.get)
        wrapper.write(originalStylesheet)
        wrapper.finish()
        # Parse and serialize both versions to normalize whitespace so we can
        # make a comparison.
        parser = CSSParser()
        self.assertEqual(
            parser.parseString(request.accumulator).cssText,
            parser.parseString(expectedStylesheet).cssText)
开发者ID:twisted,项目名称:mantissa,代码行数:25,代码来源:test_website.py

示例2: beautify_text

def beautify_text(raw, syntax):
    from lxml import etree
    from calibre.ebooks.oeb.polish.parsing import parse
    from calibre.ebooks.oeb.polish.pretty import pretty_xml_tree, pretty_html_tree
    from calibre.ebooks.chardet import strip_encoding_declarations
    if syntax == 'xml':
        root = etree.fromstring(strip_encoding_declarations(raw))
        pretty_xml_tree(root)
    elif syntax == 'css':
        import logging
        from calibre.ebooks.oeb.base import serialize, _css_logger
        from calibre.ebooks.oeb.polish.utils import setup_cssutils_serialization
        from cssutils import CSSParser, log
        setup_cssutils_serialization(tprefs['editor_tab_stop_width'])
        log.setLevel(logging.WARN)
        log.raiseExceptions = False
        parser = CSSParser(loglevel=logging.WARNING,
                           # We dont care about @import rules
                           fetcher=lambda x: (None, None), log=_css_logger)
        data = parser.parseString(raw, href='<string>', validate=False)
        return serialize(data, 'text/css')
    else:
        root = parse(raw, line_numbers=False)
        pretty_html_tree(None, root)
    return etree.tostring(root, encoding=unicode)
开发者ID:AtulKumar2,项目名称:calibre,代码行数:25,代码来源:view.py

示例3: run

    def run(self):
        # Step 0: ensure that the document_root and base_path variables are
        # set. If the file that's being processed was inside a source that has
        # either one or both not set, then this processor can't run.
        if self.document_root is None or self.base_path is None:
            raise DocumentRootAndBasePathRequiredException

        # We don't rename the file, so we can use the default output file.

        parser = CSSParser(log=None, loglevel=logging.CRITICAL)
        sheet = parser.parseFile(self.input_file)

        # Step 1: ensure the file has URLs. If it doesn't, we can stop the
        # processing.
        url_count = 0
        for url in getUrls(sheet):
            url_count += 1
            break
        if url_count == 0:
            return self.input_file

        # Step 2: resolve the relative URLs to absolute paths.
        replaceUrls(sheet, self.resolveToAbsolutePath)

        # Step 3: verify that each of these files has been synced.
        synced_files_db = urljoin(sys.path[0] + os.sep, SYNCED_FILES_DB)
        self.dbcon = sqlite3.connect(synced_files_db)
        self.dbcon.text_factory = unicode  # This is the default, but we set it explicitly, just to be sure.
        self.dbcur = self.dbcon.cursor()
        all_synced = True
        for urlstring in getUrls(sheet):
            # Skip absolute URLs.
            if urlstring.startswith("http://") or urlstring.startswith("https://"):
                continue

            # Skip broken references in the CSS file. This would otherwise
            # prevent this CSS file from ever passing through this processor.
            if not os.path.exists(urlstring):
                continue

            # Get the CDN URL for the given absolute path.
            self.dbcur.execute("SELECT url FROM synced_files WHERE input_file=?", (urlstring,))
            result = self.dbcur.fetchone()

            if result == None:
                raise RequestToRequeueException(
                    "The file '%s' has not yet been synced to the server '%s'" % (urlstring, self.process_for_server)
                )
            else:
                cdn_url = result[0]

        # Step 4: resolve the absolute paths to CDN URLs.
        replaceUrls(sheet, self.resolveToCDNURL)

        # Step 5: write the updated CSS to the output file.
        f = open(self.output_file, "w")
        f.write(sheet.cssText)
        f.close()

        return self.output_file
开发者ID:georgejipa,项目名称:fileconveyor,代码行数:60,代码来源:link_updater.py

示例4: validate_css

def validate_css(string, generate_https_urls):
    p = CSSParser(raiseExceptions=True)

    if not string or only_whitespace.match(string):
        return ("", ValidationReport())

    report = ValidationReport(string)

    # avoid a very expensive parse
    max_size_kb = 100
    if len(string) > max_size_kb * 1024:
        report.append(ValidationError((msgs["too_big"] % dict(max_size=max_size_kb))))
        return ("", report)

    if "\\" in string:
        report.append(ValidationError(_("if you need backslashes, you're doing it wrong")))

    try:
        parsed = p.parseString(string)
    except DOMException, e:
        # yuck; xml.dom.DOMException can't give us line-information
        # directly, so we have to parse its error message string to
        # get it
        line = None
        line_match = error_message_extract_re.match(e.message)
        if line_match:
            line = line_match.group(1)
            if line:
                line = int(line)
        error_message = msgs["syntax_error"] % dict(syntaxerror=e.message)
        report.append(ValidationError(error_message, e, line))
        return (None, report)
开发者ID:tolgaek,项目名称:reddit,代码行数:32,代码来源:cssfilter.py

示例5: create_importer

def create_importer(page):
    importer = Importer(page=page, style='')
    resp = urlfetch.fetch(page.url, deadline=10)
    if resp.status_code == 200:
        soup = BeautifulSoup(resp.content)
        parser = CSSParser()
        for tag in soup.findAll(re.compile(r'^(link|style)$')):
            if tag.name == 'link':
                if tag.get('href', None) and tag.get('rel', 'stylesheet').lower() == 'stylesheet':
                    url = urljoin(page.url, tag['href'])
                    if urlparse(url).netloc != urlparse(request.url).netloc:
                        importer.urls.append(url)
            elif tag.name == 'style':
                media = tag.get('media', None)
                sheet = parser.parseString(''.join(tag.contents).strip('\n'), href=url)
                style = sheet.cssText
                if media:
                    style = '@media %s {\n%s\n}' % (media, style)
                style = '/* Imported directly from %s */\n%s\n' % (page.url, style)
                importer.style += style
        # Patch around AppEngine's frame inspection
        del parser

        importer.put()
        queue_import(page)
开发者ID:naiyt,项目名称:WebPutty,代码行数:25,代码来源:tasks.py

示例6: do_import

def do_import():
    page = Page.get(request.form.get('page_key', ''))
    if not page or page.import_state != IMPORTING:
        return 'NO_IMPORTER' # We're done
    importer = Importer.gql('WHERE page=:1', page.key()).get()
    if not importer:
        # This requires a request to fetch the page and parse the URLs.
        # It also enqueues the next run.
        create_importer(page)
        return 'CREATED'
    if importer.urls:
        url = importer.urls.pop(0)
        parser = None
        try:
            resp = urlfetch.fetch(url, deadline=10)
            if resp.status_code == 200:
                parser = CSSParser()
                sheet = parser.parseString(resp.content, href=url)
                style = sheet.cssText
                importer.style += '\n\n/* Imported from %s */\n%s' % (url, style)
            else:
                raise Exception('Error fetching %s' % url)
        except Exception, e:
            import traceback
            importer.errors.append('Error importing %s' % url)
            logging.error('Error importing for Page %s from %s:\n%s\n%s', page.key().id(), url, e, traceback.format_exc())
        finally:
开发者ID:naiyt,项目名称:WebPutty,代码行数:27,代码来源:tasks.py

示例7: validate_css

def validate_css(string):
    p = CSSParser(raiseExceptions = True)

    if not string or only_whitespace.match(string):
        return ('',ValidationReport())

    report = ValidationReport(string)
    
    # avoid a very expensive parse
    max_size_kb = 100;
    if len(string) > max_size_kb * 1024:
        report.append(ValidationError((msgs['too_big']
                                       % dict (max_size = max_size_kb))))
        return (string, report)

    try:
        parsed = p.parseString(string)
    except DOMException,e:
        # yuck; xml.dom.DOMException can't give us line-information
        # directly, so we have to parse its error message string to
        # get it
        line = None
        line_match = error_message_extract_re.match(e.message)
        if line_match:
            line = line_match.group(1)
            if line:
                line = int(line)
        error_message=  (msgs['syntax_error']
                         % dict(syntaxerror = e.message))
        report.append(ValidationError(error_message,e,line))
        return (None,report)
开发者ID:vin,项目名称:reddit,代码行数:31,代码来源:cssfilter.py

示例8: main

def main():
    css = u'''
    /* some umlauts äöü and EURO sign € */
    a:before {
       content: "ä";
        }'''

    p = CSSParser()
    sheet = p.parseString(css)
    
    print """cssText in different encodings, depending on the console some
     chars may look broken but are actually not"""
    print 
    
    sheet.encoding = 'ascii'
    print sheet.cssText
    print
    
    sheet.encoding = 'iso-8859-1'
    print sheet.cssText
    print
    
    sheet.encoding = 'iso-8859-15'
    print sheet.cssText
    print
    
    sheet.encoding = 'utf-8'
    print sheet.cssText
    print
    
    # results in default UTF-8 encoding without @charset rule
    sheet.encoding = None
    print sheet.cssText
开发者ID:Western-Toronto,项目名称:truly_native_john,代码行数:33,代码来源:cssencodings.py

示例9: parse_css

 def parse_css(self, data, fname):
     from cssutils import CSSParser, log
     log.setLevel(logging.WARN)
     log.raiseExceptions = False
     data = self.decode(data)
     data = self.css_preprocessor(data)
     parser = CSSParser(loglevel=logging.WARNING,
                        # We dont care about @import rules
                        fetcher=lambda x: (None, None), log=_css_logger)
     data = parser.parseString(data, href=fname, validate=False)
     return data
开发者ID:michaelbrawn,项目名称:calibre,代码行数:11,代码来源:container.py

示例10: finish

 def finish(self):
     """
     Parse the buffered response body, rewrite its URLs, write the result to
     the wrapped request, and finish the wrapped request.
     """
     stylesheet = ''.join(self._buffer)
     parser = CSSParser()
     css = parser.parseString(stylesheet)
     css.replaceUrls(self._replace)
     self.request.write(css.cssText)
     return self.request.finish()
开发者ID:rcarmo,项目名称:divmod.org,代码行数:11,代码来源:website.py

示例11: normalize_filter_css

def normalize_filter_css(props):
    import logging
    ans = set()
    p = CSSParser(loglevel=logging.CRITICAL, validate=False)
    for prop in props:
        n = normalizers.get(prop, None)
        ans.add(prop)
        if n is not None and prop in SHORTHAND_DEFAULTS:
            dec = p.parseStyle('%s: %s' % (prop, SHORTHAND_DEFAULTS[prop]))
            cssvalue = dec.getPropertyCSSValue(dec.item(0))
            ans |= set(n(prop, cssvalue))
    return ans
开发者ID:089git,项目名称:calibre,代码行数:12,代码来源:normalize_css.py

示例12: _apply_to_style_uri

 def _apply_to_style_uri(style_text, func):
     dirty = False
     parser = CSSParser().parseStyle(style_text)
     for prop in parser.getProperties(all=True):
         for value in prop.propertyValue:
             if value.type == 'URI':
                 old_uri = value.uri
                 new_uri = func(old_uri, element=value)
                 if new_uri != old_uri:
                     dirty = True
                     value.uri = new_uri
     if dirty:
         return to_unicode(parser.cssText, 'utf-8')
     else:
         return style_text
开发者ID:jesserobertson,项目名称:python-emails,代码行数:15,代码来源:transformer.py

示例13: __init__

    def __init__(self, container, do_embed=False):
        self.container = container
        self.log = self.logger = container.log
        self.do_embed = do_embed
        must_use_qt()
        self.parser = CSSParser(loglevel=logging.CRITICAL, log=logging.getLogger('calibre.css'))
        self.first_letter_pat = regex.compile(r'^[\p{Ps}\p{Ps}\p{Pe}\p{Pi}\p{Pf}\p{Po}]+', regex.VERSION1 | regex.UNICODE)

        self.loop = QEventLoop()
        self.view = QWebView()
        self.page = Page(self.log)
        self.view.setPage(self.page)
        self.page.setViewportSize(QSize(1200, 1600))

        self.view.loadFinished.connect(self.collect,
                type=Qt.QueuedConnection)

        self.render_queue = list(container.spine_items)
        self.font_stats = {}
        self.font_usage_map = {}
        self.font_spec_map = {}
        self.font_rule_map = {}
        self.all_font_rules = {}

        QTimer.singleShot(0, self.render_book)

        if self.loop.exec_() == 1:
            raise Exception('Failed to gather statistics from book, see log for details')
开发者ID:Aliminator666,项目名称:calibre,代码行数:28,代码来源:stats.py

示例14: parse_css

 def parse_css(self, data, fname='<string>', is_declaration=False):
     from cssutils import CSSParser, log
     log.setLevel(logging.WARN)
     log.raiseExceptions = False
     if isinstance(data, bytes):
         data = self.decode(data)
     if not self.tweak_mode:
         data = self.css_preprocessor(data)
     parser = CSSParser(loglevel=logging.WARNING,
                        # We dont care about @import rules
                        fetcher=lambda x: (None, None), log=_css_logger)
     if is_declaration:
         data = parser.parseStyle(data, validate=False)
     else:
         data = parser.parseString(data, href=fname, validate=False)
     return data
开发者ID:089git,项目名称:calibre,代码行数:16,代码来源:container.py

示例15: __init__

class Parser:
    def __init__(self):
        self.css_parser = CSSParser()

    def get_colors_from_file(self, f):
        sheet = self.css_parser.parseFile(f, 'utf-8')
        my_dict = {}
        for rule in sheet:
            if rule.type == rule.STYLE_RULE:
                for property in rule.style:
                    if property.name == 'color':
                        key = property.value
                        if key in my_dict:
                            my_dict[key] += 1
                        else:
                            my_dict[key] = 1

        return my_dict

    def read_all_css_files_in_dir(self):
        l = []
        for filename in glob.glob('*.css'):
            d = self.get_colors_from_file(filename)
            l.append(d)

        return l
开发者ID:johandersson,项目名称:CssKoll,代码行数:26,代码来源:Parser.py


注:本文中的cssutils.CSSParser类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。