当前位置: 首页>>代码示例>>Python>>正文


Python _mapping.LEXERS类代码示例

本文整理汇总了Python中pygments.lexers._mapping.LEXERS的典型用法代码示例。如果您正苦于以下问题:Python LEXERS类的具体用法?Python LEXERS怎么用?Python LEXERS使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了LEXERS类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: document_lexers

    def document_lexers(self):
        from pygments.lexers._mapping import LEXERS
        out = []
        modules = {}
        moduledocstrings = {}
        for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
            module = data[0]
            mod = __import__(module, None, None, [classname])
            self.filenames.add(mod.__file__)
            cls = getattr(mod, classname)
            if not cls.__doc__:
                print("Warning: %s does not have a docstring." % classname)
            docstring = cls.__doc__
            if isinstance(docstring, bytes):
                docstring = docstring.decode('utf8')
            modules.setdefault(module, []).append((
                classname,
                ', '.join(data[2]) or 'None',
                ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
                ', '.join(data[4]) or 'None',
                docstring))
            if module not in moduledocstrings:
                moddoc = mod.__doc__
                if isinstance(moddoc, bytes):
                    moddoc = moddoc.decode('utf8')
                moduledocstrings[module] = moddoc

        for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
            heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
            out.append(MODULEDOC % (module, heading, '-'*len(heading)))
            for data in lexers:
                out.append(LEXERDOC % data)

        return ''.join(out)
开发者ID:2015E8014661092,项目名称:jinjaysnow.github.io,代码行数:34,代码来源:sphinxext.py

示例2: get_lexer_for_filename

def get_lexer_for_filename(_fn, code=None, **options):
    """
    Get a lexer for a filename.  If multiple lexers match the filename
    pattern, use ``analyze_text()`` to figure out which one is more
    appropriate.
    """
    matches = []
    fn = basename(_fn)
    for modname, name, _, filenames, _ in LEXERS.itervalues():
        for filename in filenames:
            if fnmatch.fnmatch(fn, filename):
                if name not in _lexer_cache:
                    _load_lexers(modname)
                matches.append(_lexer_cache[name])
    for cls in find_plugin_lexers():
        for filename in cls.filenames:
            if fnmatch.fnmatch(fn, filename):
                matches.append(cls)

    def get_rating(cls):
        # The class _always_ defines analyse_text because it's included in
        # the Lexer class.  The default implementation returns None which
        # gets turned into 0.0.  Run scripts/detect_missing_analyse_text.py
        # to find lexers which need it overridden.
        d = cls.analyse_text(code)
        #print "Got %r from %r" % (d, cls)
        return d

    if code:
        matches.sort(key=get_rating)
    if matches:
        #print "Possible lexers, after sort:", matches
        return matches[-1](**options)
    raise ClassNotFound('no lexer for filename %r found' % _fn)
开发者ID:mbialon,项目名称:yashapp,代码行数:34,代码来源:__init__.py

示例3: __getattr__

 def __getattr__(self, name):
     info = LEXERS.get(name)
     if info:
         _load_lexers(info[0])
         cls = _lexer_cache[info[1]]
         setattr(self, name, cls)
         return cls
     raise AttributeError(name)
开发者ID:CindyLulu,项目名称:SublimeHighlight,代码行数:8,代码来源:__init__.py

示例4: get_all_lexers

def get_all_lexers():
    from pygments.lexers._mapping import LEXERS
    from pygments.plugin import find_plugin_lexers

    for item in LEXERS.itervalues():
        yield item[1:]
    for cls in find_plugin_lexers():
        yield cls.name, cls.aliases, cls.filenames, cls.mimetypes
开发者ID:nyuhuhuu,项目名称:trachacks,代码行数:8,代码来源:__init__.py

示例5: get_language

def get_language(syntax, ext):
    """
    search language of pygments by syntax and after by extension
    """
    if len(ext) == 0 : ext='none'
    if ext[0] == '.': ext = '*' + ext
    if ext[0] != '*': ext = '*.' + ext
    s = os.path.basename(os.path.join(sublime.packages_path(),syntax))
    if s[-len('.tmLanguage'):] == '.tmLanguage':
        s = s[:-len('.tmLanguage')]
        for module_name, name, aliases, exts, mine in LEXERS.itervalues():
            if s.lower() in aliases:
                return s.lower()
        for module_name, name, aliases, exts, mine in LEXERS.itervalues():
            if ext in exts:
                return aliases[0]
    return None
开发者ID:Jedius,项目名称:sublime-jed,代码行数:17,代码来源:2pdf.py

示例6: lexers

def lexers():
    lexers = []
    for key in LEXERS.keys():
        lexer = LEXERS[key]
        lexers.append( (lexer[1], key) )
    
    lexers.sort(lambda a, b: cmp(a[1], b[1]))
    
    return lexers
开发者ID:markchadwick,项目名称:syntaxing,代码行数:9,代码来源:__init__.py

示例7: get_all_lexers

def get_all_lexers():
    """
    Return a generator of tuples in the form ``(name, aliases,
    filenames, mimetypes)`` of all know lexers.
    """
    for item in LEXERS.itervalues():
        yield item[1:]
    for lexer in find_plugin_lexers():
        yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
开发者ID:CindyLulu,项目名称:SublimeHighlight,代码行数:9,代码来源:__init__.py

示例8: _iter_lexerclasses

def _iter_lexerclasses():
    """
    Return an iterator over all lexer classes.
    """
    for module_name, name, _, _, _ in LEXERS.itervalues():
        if name not in _lexer_cache:
            _load_lexers(module_name)
        yield _lexer_cache[name]
    for lexer in find_plugin_lexers():
        yield lexer
开发者ID:adambernier,项目名称:gae_exp,代码行数:10,代码来源:__init__.py

示例9: get_lexer_for_mimetype

def get_lexer_for_mimetype(_mime, **options):
    """
    Get a lexer for a mimetype.
    """
    for modname, name, _, _, mimetypes in LEXERS.itervalues():
        if _mime in mimetypes:
            if name not in _lexer_cache:
                _load_lexers(modname)
            return _lexer_cache[name](**options)
    for cls in find_plugin_lexers():
        if _mime in cls.mimetypes:
            return cls(**options)
    raise ClassNotFound('no lexer for mimetype %r found' % _mime)
开发者ID:CindyLulu,项目名称:SublimeHighlight,代码行数:13,代码来源:__init__.py

示例10: get_lexer_by_name

def get_lexer_by_name(_alias, **options):
    """
    Get a lexer by an alias.
    """
    # lookup builtin lexers
    for module_name, name, aliases, _, _ in LEXERS.itervalues():
        if _alias in aliases:
            if name not in _lexer_cache:
                _load_lexers(module_name)
            return _lexer_cache[name](**options)
    # continue with lexers from setuptools entrypoints
    for cls in find_plugin_lexers():
        if _alias in cls.aliases:
            return cls(**options)
    raise ClassNotFound('no lexer for alias %r found' % _alias)
开发者ID:CindyLulu,项目名称:SublimeHighlight,代码行数:15,代码来源:__init__.py

示例11: find_lexer_class

def find_lexer_class(name):
    """
    Lookup a lexer class by name. Return None if not found.
    """
    if name in _lexer_cache:
        return _lexer_cache[name]
    # lookup builtin lexers
    for module_name, lname, aliases, _, _ in LEXERS.itervalues():
        if name == lname:
            _load_lexers(module_name)
            return _lexer_cache[name]
    # continue with lexers from setuptools entrypoints
    for cls in find_plugin_lexers():
        if cls.name == name:
            return cls
开发者ID:CindyLulu,项目名称:SublimeHighlight,代码行数:15,代码来源:__init__.py

示例12: get_lexer_for_filename

def get_lexer_for_filename(_fn, **options):
    """
    Get a lexer for a filename.
    """
    fn = basename(_fn)
    for modname, name, _, filenames, _ in LEXERS.itervalues():
        for filename in filenames:
            if fnmatch.fnmatch(fn, filename):
                if name not in _lexer_cache:
                    _load_lexers(modname)
                return _lexer_cache[name](**options)
    for cls in find_plugin_lexers():
        for filename in cls.filenames:
            if fnmatch.fnmatch(fn, filename):
                return cls(**options)
    raise ClassNotFound('no lexer for filename %r found' % _fn)
开发者ID:Khaos,项目名称:pygments.wlwriter,代码行数:16,代码来源:__init__.py

示例13: get_lexer_for_filename

def get_lexer_for_filename(_fn, code=None, **options):
    """
    Get a lexer for a filename.  If multiple lexers match the filename
    pattern, use ``analyze_text()`` to figure out which one is more
    appropriate.
    """
    matches = []
    fn = basename(_fn)
    for modname, name, _, filenames, _ in LEXERS.itervalues():
        for filename in filenames:
            if fnmatch.fnmatch(fn, filename):
                if name not in _lexer_cache:
                    _load_lexers(modname)
                matches.append((_lexer_cache[name], filename))
    for cls in find_plugin_lexers():
        for filename in cls.filenames:
            if fnmatch.fnmatch(fn, filename):
                matches.append((cls, filename))

    if sys.version_info > (3,) and isinstance(code, bytes):
        # decode it, since all analyse_text functions expect unicode
        code = code.decode('latin1')

    def get_rating(info):
        cls, filename = info
        # explicit patterns get a bonus
        bonus = '*' not in filename and 0.5 or 0
        # The class _always_ defines analyse_text because it's included in
        # the Lexer class.  The default implementation returns None which
        # gets turned into 0.0.  Run scripts/detect_missing_analyse_text.py
        # to find lexers which need it overridden.
        if code:
            return cls.analyse_text(code) + bonus
        return cls.priority + bonus

    if matches:
        matches.sort(key=get_rating)
        #print "Possible lexers, after sort:", matches
        return matches[-1][0](**options)
    raise ClassNotFound('no lexer for filename %r found' % _fn)
开发者ID:CindyLulu,项目名称:SublimeHighlight,代码行数:40,代码来源:__init__.py

示例14: _load_lexers

    :copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""

import sys
import types
import fnmatch
from os.path import basename

from pygments.lexers._mapping import LEXERS
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, bytes


__all__ = ["get_lexer_by_name", "get_lexer_for_filename", "find_lexer_class", "guess_lexer"] + LEXERS.keys()

_lexer_cache = {}


def _load_lexers(module_name):
    """
    Load a lexer (and all others in the module too).
    """
    mod = __import__(module_name, None, None, ["__all__"])
    for lexer_name in mod.__all__:
        cls = getattr(mod, lexer_name)
        _lexer_cache[cls.name] = cls


def get_all_lexers():
开发者ID:mmagnus,项目名称:SyntaxHighlight,代码行数:30,代码来源:__init__.py

示例15: get

 def get(self):
     path = "langs.html"
     langs = [(l[1], l[2]) for l in LEXERS.values()]
     langs.sort(key=lambda x: x[0].lower())
     self.response.out.write(template.render(path, 
                                             {'langs': langs}))
开发者ID:emirotin,项目名称:pygmentizer,代码行数:6,代码来源:pygmentizer.py


注:本文中的pygments.lexers._mapping.LEXERS类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。