當前位置: 首頁>>代碼示例>>Python>>正文


Python lex.lex方法代碼示例

本文整理匯總了Python中ply.lex.lex方法的典型用法代碼示例。如果您正苦於以下問題:Python lex.lex方法的具體用法?Python lex.lex怎麽用?Python lex.lex使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在ply.lex的用法示例。


在下文中一共展示了lex.lex方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
開發者ID:nojanath,項目名稱:SublimeKSP,代碼行數:23,代碼來源:cpp.py

示例2: init

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def init(outputdir=None):
    outputdir = outputdir or os.path.dirname(__file__)  # os.getcwd()
    current_module = sys.modules[__name__]
    #print (outputdir, current_module)
    debug = 0
    optimize = 0
    lexer = lex.lex(optimize=0, debug=debug)

    # lexer.input('on init\n   declare shared parameter cutoff')
    # while True:
    #     tok = lexer.token()
    #     if tok is None:
    #         break
    #     print (tok)

    return yacc.yacc(method="LALR", optimize=optimize, debug=debug,
                     write_tables=0, module=current_module, start='script',
                     outputdir=outputdir, tabmodule='ksp_parser_tab') 
開發者ID:nojanath,項目名稱:SublimeKSP,代碼行數:20,代碼來源:ksp_parser.py

示例3: ppfas

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def ppfas(text, subs={}, **subs2):
    """Parse a string of several PrettyPFA expressions (delimited by semicolons) as a list of PFA abstract syntax trees.

    :type text: string
    :param text: PrettyPFA expressions (delimited by semicolons)
    :type subs: dict from substitution names to substitutions
    :param subs: replacement values as PFA titus.pfaast.Ast, PrettyPFA strings, or PFA Pythonized JSON
    :type subs2: dict from substitution names to substitutions
    :param subs2: added to ``subs`` (a more convenient way to pass them)
    :rtype: list of titus.pfaast.Expression
    :return: parsed expressions as PFA
    """

    subs2.update(subs)

    if not exprParser.initialized:
        try:
            import ply.lex as lex
            import ply.yacc as yacc
        except ImportError:
            raise ImportError("ply (used to parse the PrettyPFA) is not available on your system")
        else:
            exprParser.initialize(lex, yacc)

    return exprParser.parse(text, subs2) 
開發者ID:modelop,項目名稱:hadrian,代碼行數:27,代碼來源:prettypfa.py

示例4: include

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def include(self, filename):
        """ Changes FILENAME and line count
        """
        if filename != STDIN and filename in [x[0] for x in self.filestack]:  # Already included?
            self.warning(' Recursive inclusion')

        self.filestack.append([filename, 1, self.lex, self.input_data])
        self.lex = lex.lex(object=self)
        result = self.put_current_line()  # First #line start with \n (EOL)

        try:
            if filename == STDIN:
                self.input_data = sys.stdin.read()
            else:
                self.input_data = api.utils.read_txt_file(filename)

            if len(self.input_data) and self.input_data[-1] != EOL:
                self.input_data += EOL
        except IOError:
            self.input_data = EOL

        self.lex.input(self.input_data)
        return result 
開發者ID:boriel,項目名稱:zxbasic,代碼行數:25,代碼來源:zxbasmpplex.py

示例5: include_end

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def include_end(self):
        """ Performs and end of include.
        """
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if not self.filestack:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()  # Creates the token
        result.value = self.put_current_line()
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result 
開發者ID:boriel,項目名稱:zxbasic,代碼行數:21,代碼來源:zxbasmpplex.py

示例6: include_end

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def include_end(self):
        """ Performs and end of include.
        """
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if not self.filestack:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()
        result.value = self.put_current_line(suffix='\n')
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result 
開發者ID:boriel,項目名稱:zxbasic,代碼行數:21,代碼來源:zxbpplex.py

示例7: __init__

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def __init__(self):
        """ Creates a new GLOBAL lexer instance
        """
        self.lex = None
        self.filestack = []  # Current filename, and line number being parsed
        self.input_data = ''
        self.tokens = tokens
        self.states = states
        self.next_token = None  # if set to something, this will be returned once
        self.expectingDirective = False  # True if the lexer expects a preprocessor directive
        self.__COMMENT_LEVEL = 0

# --------------------- PREPROCESOR FUNCTIONS -------------------


# Needed for states 
開發者ID:boriel,項目名稱:zxbasic,代碼行數:18,代碼來源:zxbpplex.py

示例8: group_lines

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    # 
    # Remove leading/trailing whitespace tokens from a token list
    # ---------------------------------------------------------------------- 
開發者ID:nojanath,項目名稱:SublimeKSP,代碼行數:34,代碼來源:cpp.py

示例9: t_LINECONT

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def t_LINECONT(t):
    r'\.\.\.[ \t]*\n'
    t.lexer.lineno += 1
    pass

##lex.lex()

# *********************************** PARSER ******************************************* 
開發者ID:nojanath,項目名稱:SublimeKSP,代碼行數:10,代碼來源:ksp_parser.py

示例10: ast

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def ast(text, check=True, version=None, subs={}, **subs2):
    """Parse PrettyPFA and return the result as a PFA abstract syntax tree.

    :type text: string
    :param text: PrettyPFA to parse
    :type check: bool
    :param check: if ``True``, check the result for PFA semantic errors (default ``True``); **Note:** if the PrettyPFA contains any unresolved substitutions (in ``<<French quotes>>``), it will **not** be checked
    :type version: string or ``None``
    :param version: version of the PFA language to use while interpreting (``None`` defaults to titus.version.defaultPFAVersion)
    :type subs: dict from substitution names to substitutions
    :param subs: replacement values as PFA titus.pfaast.Ast, PrettyPFA strings, or PFA Pythonized JSON
    :type subs2: dict from substitution names to substitutions
    :param subs2: added to ``subs`` (a more convenient way to pass them)
    :rtype: titus.pfaast.EngineConfig
    :return: PFA abstract syntax tree
    """

    subs2.update(subs)

    if not parser.initialized:
        try:
            import ply.lex as lex
            import ply.yacc as yacc
        except ImportError:
            raise ImportError("ply (used to parse the PrettyPFA) is not available on your system")
        else:
            parser.initialize(lex, yacc)

    out = parser.parse(text, subs2)

    anysubs = lambda x: x
    anysubs.isDefinedAt = lambda x: isinstance(x, Subs)

    if check and len(out.collect(anysubs)) == 0:
        PFAEngine.fromAst(out, version=version)
    return out 
開發者ID:modelop,項目名稱:hadrian,代碼行數:38,代碼來源:prettypfa.py

示例11: __init__

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def __init__(self, lexer_object=None, lookup={}):
        if self.mode == "ply":
            self._lookup = lookup
            self._lexer = lex.lex(object=lexer_object if lexer_object else CircuitLexer())
            self._parser = yacc.yacc(module=self, start="ppstring", debug=False,
                                     tabmodule='pygsti.io.parsetab_string')
            self.parse = self.ply_parse
        else:
            self.parse = self._parse 
開發者ID:pyGSTio,項目名稱:pyGSTi,代碼行數:11,代碼來源:__init__.py

示例12: group_lines

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    #
    # Remove leading/trailing whitespace tokens from a token list
    # ---------------------------------------------------------------------- 
開發者ID:remg427,項目名稱:misp42splunk,代碼行數:34,代碼來源:cpp.py

示例13: _build

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def _build(self, **kwargs):

        self._lexer = lex.lex(module=self, **kwargs)
        self._parser = yacc.yacc(module=self, **kwargs)

    ############################################## 
開發者ID:FabriceSalvaire,項目名稱:PySpice,代碼行數:8,代碼來源:Parser.py

示例14: __init__

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def __init__(self, **kwargs) -> None:
        """Creates inner lexer."""
        self._lexer = lex.lex(module=self, **kwargs)
        self.reset() 
開發者ID:wemake-services,項目名稱:dotenv-linter,代碼行數:6,代碼來源:lexer.py

示例15: token

# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import lex [as 別名]
def token(self) -> lex.LexToken:
        """
        Returns the next token to work with.

        Should not be called directly, since it is a part of ``ply`` API.
        """
        return self._lexer.token() 
開發者ID:wemake-services,項目名稱:dotenv-linter,代碼行數:9,代碼來源:lexer.py


注:本文中的ply.lex.lex方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。