当前位置: 首页>>代码示例>>Python>>正文


Python OneOrMore.setParseAction方法代码示例

本文整理汇总了Python中pyparsing.OneOrMore.setParseAction方法的典型用法代码示例。如果您正苦于以下问题:Python OneOrMore.setParseAction方法的具体用法?Python OneOrMore.setParseAction怎么用?Python OneOrMore.setParseAction使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyparsing.OneOrMore的用法示例。


在下文中一共展示了OneOrMore.setParseAction方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: init_parser

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
    def init_parser(self):

        INTEGER = Word(nums)
        INTEGER.setParseAction(lambda x: int(x[0]))

        header = INTEGER("species_count") + INTEGER("sequence_length") +\
            Suppress(restOfLine)
        header.setParseAction(self.set_header)

        sequence_name = Word(
            alphas + nums + "!#$%&\'*+-./;<=>[email protected][\\]^_`{|}~",
            max=100)

        # Take a copy and disallow line breaks in the bases
        bases = self.BASES.copy()
        bases.setWhitespaceChars(" \t")
        seq_start = sequence_name("species") + bases(
            "sequence") + Suppress(LineEnd())
        seq_start.setParseAction(self.set_seq_start)
        seq_start_block = OneOrMore(seq_start)
        seq_start_block.setParseAction(self.set_start_block)

        seq_continue = bases("sequence") + Suppress(LineEnd())
        seq_continue.setParseAction(self.set_seq_continue)

        seq_continue_block = Suppress(LineEnd()) + OneOrMore(seq_continue)
        seq_continue_block.setParseAction(self.set_continue_block)

        return header + seq_start_block + ZeroOrMore(seq_continue_block)
开发者ID:brettc,项目名称:tigger,代码行数:31,代码来源:alignment.py

示例2: grammar

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
def grammar():
    parenthesis = Forward()
    parenthesis <<= "(" + ZeroOrMore(CharsNotIn("()") | parenthesis) + ")"

    field_def = OneOrMore(Word(alphanums + "_\"'`:-") | parenthesis)
    field_def.setParseAction(field_act)

    tablename_def = ( Word(alphas + "`_") | QuotedString("\"") )

    field_list_def = field_def + ZeroOrMore(Suppress(",") + field_def)
    field_list_def.setParseAction(field_list_act)

    create_table_def = Literal("CREATE") + "TABLE" + tablename_def.setResultsName("tableName") + "(" + field_list_def.setResultsName("fields") + ")" + ";"
    create_table_def.setParseAction(create_table_act)

    add_fkey_def = Literal("ALTER") + "TABLE" + "ONLY" + tablename_def.setResultsName("tableName") + "ADD" + "CONSTRAINT" + Word(alphanums + "_") + "FOREIGN" + "KEY" + "(" + Word(alphanums + "_").setResultsName("keyName") + ")" + "REFERENCES" + Word(alphanums + "_").setResultsName("fkTable") + "(" + Word(alphanums + "_").setResultsName("fkCol") + ")" + ";"
    add_fkey_def.setParseAction(add_fkey_act)

    other_statement_def = OneOrMore(CharsNotIn(";")) + ";"
    other_statement_def.setParseAction(other_statement_act)

    comment_def = "--" + ZeroOrMore(CharsNotIn("\n"))
    comment_def.setParseAction(other_statement_act)

    return OneOrMore(comment_def | create_table_def | add_fkey_def | other_statement_def)
开发者ID:LukeShu,项目名称:sql_graphviz,代码行数:27,代码来源:sql_graphviz.py

示例3: make_grammar

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
def make_grammar():
    """Creates the grammar to be used by a spec matcher."""
    # This is apparently how pyparsing recommends to be used,
    # as http://pyparsing.wikispaces.com/share/view/644825 states that
    # it is not thread-safe to use a parser across threads.

    unary_ops = (
        # Order matters here (so that '=' doesn't match before '==')
        Literal("==") | Literal("=") |
        Literal("!=") | Literal("<in>") |
        Literal(">=") | Literal("<=") |
        Literal("s==") | Literal("s!=") |
        # Order matters here (so that '<' doesn't match before '<=')
        Literal("s<=") | Literal("s<") |
        # Order matters here (so that '>' doesn't match before '>=')
        Literal("s>=") | Literal("s>"))

    or_ = Literal("<or>")

    # An atom is anything not an keyword followed by anything but whitespace
    atom = ~(unary_ops | or_) + Regex(r"\S+")

    unary = unary_ops + atom
    disjunction = OneOrMore(or_ + atom)

    # Even-numbered tokens will be '<or>', so we drop them
    disjunction.setParseAction(lambda _s, _l, t: ["<or>"] + t[1::2])

    expr = disjunction | unary | atom
    return expr
开发者ID:singh264,项目名称:ugradproject,代码行数:32,代码来源:specs_matcher.py

示例4: parse

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
    def parse(self, what):
        UNICODE_PUNCT_FINAL=ur'\u00BB\u2019\u201D\u203A\u2E03\u2E05\u2E0A\u2E0D\u2E1D\u2E21'
        UNICODE_PUNCT_INITIAL=ur'\u00AB\u2018\u201B\u201C\u201F\u2039\u2E02\u2E04\u2E09\u2E0C\u2E1CU+2E20'
        WORD_CHAR_RE = r'[\w.,\'\"\(\)</>%s%s-]'%(UNICODE_PUNCT_INITIAL, UNICODE_PUNCT_FINAL)
        CITEKEY_RE = r'\w[\w\(:.#\$%&+?<>~/\)-]+'
        greedyToken = Regex(r'%s+'%(WORD_CHAR_RE))
        wordWithDigits = Regex(r'%s*[0-9]%s*'%(WORD_CHAR_RE, WORD_CHAR_RE))

        # translate embedded emph & strong RST to HTML
        emText = '*' + OneOrMore(greedyToken) + '*'
        emText.setParseAction(lambda s,l,t:
                                  "<i>%s</i>"%(" ".join(t[1:-1])))
        strongText = '**' + OneOrMore(greedyToken) + '**'
        strongText.setParseAction(lambda s,l,t: 
                                  "<b>%s</b>"%(" ".join(t[1:-1])))

        text = strongText | emText | greedyToken

        locator = (Optional(',') + OneOrMore(wordWithDigits)) ^ (Optional(',') + Optional(greedyToken) + OneOrMore(wordWithDigits))

        def locator_parse_action(s, l, t):
            raw = " ".join(t)
            # strip leading comma
            return CiteParser.Locator(re.sub('^,\s+', '', raw))
        locator.setParseAction(locator_parse_action)

        citeKey = Optional('-') + '@' + Regex(CITEKEY_RE)
        citeKey.setParseAction(lambda s,l,t: CiteParser.CiteKey(t))

        # suffix comes after a cite
        suffix = OneOrMore(text)
        suffix.setParseAction(lambda s,l,t: CiteParser.Suffix(" ".join(t)))

        # prefix comes before a cite
        prefix = OneOrMore(text)
        prefix.setParseAction(lambda s,l,t: CiteParser.Prefix(" ".join(t)))

        # a short cite, author + (date)
        shortCite = Optional('-') + '@' + Regex(CITEKEY_RE)
        shortCite.setParseAction(lambda s,l,t: CiteParser.ShortCite(t))

        # a full & complete cite (for use in brackets)
        fullCite = (citeKey | (prefix + citeKey)) + Optional(locator) + Optional(suffix)
        fullCite.setParseAction(lambda s,l,t: CiteParser.FullCite(t))

        restCite = ';' + fullCite

        bracketedCite = ('[' + fullCite + ZeroOrMore(restCite) + ']')

        shortCiteExtra = ('[' + locator + Optional(suffix) + ZeroOrMore(restCite) + ']')
        shortCiteExtra.setParseAction(lambda s,l,t: CiteParser.ShortCiteExtra(t))

        topCite = bracketedCite ^ shortCite + shortCiteExtra ^ shortCite + bracketedCite ^ shortCite

        try:
            raw = topCite.parseString(what, True)
            return self._results2cites(list(raw))
        except ParseException:
            raise Exception('The citation %s was not parseable.'%(what))
开发者ID:egh,项目名称:zot4rst,代码行数:61,代码来源:parser.py

示例5: expression

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
def expression():
    """
    """
    def transformer(string, location, tokens):
        return tokens.asList()
    token = OneOrMore(term())
    token.setName("expression")
    token.setParseAction(transformer)
    return token
开发者ID:extesla,项目名称:dice-python,代码行数:11,代码来源:grammar.py

示例6: get_highlight_expression

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
def get_highlight_expression():
    field_expression = Word(srange("[a-zA-Z0-9_.*]"))
    field_expression.setParseAction(parse_highlight_field_expression)
    fields_expression = OneOrMore(
        field_expression + Optional(',').suppress())
    fields_expression.setParseAction(parse_highlight_expression)
    highlight_expression = Word('highlight:').suppress() \
        + Word('[').suppress() \
        + fields_expression + Word(']').suppress()
    return highlight_expression
开发者ID:Aplopio,项目名称:plasticparser,代码行数:12,代码来源:tokenizer.py

示例7: parse

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
    def parse(self, what):
        WORD_CHAR_RE = r'[\w.,\'\"\(\)</>-]'
        
        greedyToken = Regex(r'%s+'%(WORD_CHAR_RE))
        wordWithDigits = Regex(r'%s*[0-9]%s*'%(WORD_CHAR_RE, WORD_CHAR_RE))

        # translate embedded emph & strong RST to HTML
        emText = '*' + OneOrMore(greedyToken) + '*'
        emText.setParseAction(lambda s,l,t:
                                  "<i>%s</i>"%(" ".join(t[1:-1])))
        strongText = '**' + OneOrMore(greedyToken) + '**'
        strongText.setParseAction(lambda s,l,t: 
                                  "<b>%s</b>"%(" ".join(t[1:-1])))

        text = strongText | emText | greedyToken

        locator = (Optional(',') + OneOrMore(wordWithDigits)) ^ (Optional(',') + Optional(greedyToken) + OneOrMore(wordWithDigits))

        def locator_parse_action(s, l, t):
            raw = " ".join(t)
            # strip leading comma
            return CiteParser.Locator(re.sub('^,\s+', '', raw))
        locator.setParseAction(locator_parse_action)

        citeKey = Optional('-') + '@' + Regex(r'[\w-]+')
        citeKey.setParseAction(lambda s,l,t: CiteParser.CiteKey(t))

        # suffix comes after a cite
        suffix = OneOrMore(text)
        suffix.setParseAction(lambda s,l,t: CiteParser.Suffix(" ".join(t)))

        # prefix comes before a cite
        prefix = OneOrMore(text)
        prefix.setParseAction(lambda s,l,t: CiteParser.Prefix(" ".join(t)))

        # a short cite, author + (date)
        shortCite = Optional('-') + '@' + Regex(r'[\w-]+')
        shortCite.setParseAction(lambda s,l,t: CiteParser.ShortCite(t))

        # a full & complete cite (for use in brackets)
        fullCite = (citeKey | (prefix + citeKey)) + Optional(locator) + Optional(suffix)
        fullCite.setParseAction(lambda s,l,t: CiteParser.FullCite(t))

        restCite = ';' + fullCite

        bracketedCite = ('[' + fullCite + ZeroOrMore(restCite) + ']')

        shortCiteExtra = ('[' + locator + Optional(suffix) + ZeroOrMore(restCite) + ']')
        shortCiteExtra.setParseAction(lambda s,l,t: CiteParser.ShortCiteExtra(t))

        topCite = bracketedCite ^ shortCite + shortCiteExtra ^ shortCite + bracketedCite ^ shortCite

        raw = topCite.parseString(what, True)
        return self._results2cites(list(raw))
开发者ID:howthebodyworks,项目名称:zot4rst,代码行数:56,代码来源:parser.py

示例8: get_facet_expression

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
def get_facet_expression():
    facet_logical_expression = get_nested_logical_expression()
    single_facet_expression = Word(
        srange("[a-zA-Z0-9_.]")) +\
        Optional(
            Word('(').suppress() +
            OneOrMore(facet_logical_expression).setParseAction(parse_one_or_more_facets_expression) +
            Word(')').suppress())
    single_facet_expression.setParseAction(parse_single_facet_expression)
    base_facets_expression = OneOrMore(single_facet_expression
                                       + Optional(',').suppress())
    base_facets_expression.setParseAction(parse_base_facets_expression)
    facets_expression = Word('facets:').suppress() \
        + Word('[').suppress() \
        + base_facets_expression + Word(']').suppress()
    return facets_expression
开发者ID:vivekchand,项目名称:plasticparser,代码行数:18,代码来源:tokenizer.py

示例9: get_aggregations_expression

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
def get_aggregations_expression():
    aggs_logical_expression = get_nested_logical_expression()
    single_aggs_expression = Word(
        srange("[a-zA-Z0-9_.]")) +\
        Optional(
            Word('(').suppress() +
            OneOrMore(aggs_logical_expression).setParseAction(
                parse_one_or_more_aggs_expression) +
            Word(')').suppress())
    single_aggs_expression.setParseAction(parse_single_aggs_expression)
    base_aggs_expression = OneOrMore(single_aggs_expression
                                       + Optional(',').suppress())
    base_aggs_expression.setParseAction(parse_base_aggs_expression)
    aggs_expression = Word('aggregations:').suppress() \
        + Word('[').suppress() \
        + base_aggs_expression + Word(']').suppress()
    return aggs_expression
开发者ID:Aplopio,项目名称:plasticparser,代码行数:19,代码来源:tokenizer.py

示例10: define_dot_parser

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
    def define_dot_parser(self):
        """Define dot grammar

        Based on the grammar http://www.graphviz.org/doc/info/lang.html
        """
        # punctuation
        colon  = Literal(":")
        lbrace = Suppress("{")
        rbrace = Suppress("}")
        lbrack = Suppress("[")
        rbrack = Suppress("]")
        lparen = Literal("(")
        rparen = Literal(")")
        equals = Suppress("=")
        comma  = Literal(",")
        dot    = Literal(".")
        slash  = Literal("/")
        bslash = Literal("\\")
        star   = Literal("*")
        semi   = Suppress(";")
        at     = Literal("@")
        minus  = Literal("-")
        pluss  = Suppress("+")

        # keywords
        strict_    = CaselessLiteral("strict")
        graph_     = CaselessLiteral("graph")
        digraph_   = CaselessLiteral("digraph")
        subgraph_  = CaselessLiteral("subgraph")
        node_      = CaselessLiteral("node")
        edge_      = CaselessLiteral("edge")

        punctuation_ = "".join( [ c for c in string.punctuation if c not in '_' ] ) +string.whitespace
        # token definitions

        identifier = Word(alphanums + "_" ).setName("identifier")

        #double_quoted_string = QuotedString('"', multiline=True,escChar='\\',
        #    unquoteResults=True) # dblQuotedString
        double_quoted_string = Regex(r'\"(?:\\\"|\\\\|[^"])*\"', re.MULTILINE)
        double_quoted_string.setParseAction(removeQuotes)
        quoted_string = Combine(double_quoted_string+
            Optional(OneOrMore(pluss+double_quoted_string)),adjacent=False)
        alphastring_ = OneOrMore(CharsNotIn(punctuation_))

        def parse_html(s, loc, toks):
            return '<<%s>>' % ''.join(toks[0])


        opener = '<'
        closer = '>'
        try:
            html_text = pyparsing.nestedExpr( opener, closer,
                (( CharsNotIn(
                    opener + closer ).setParseAction( lambda t:t[0] ))
                )).setParseAction(parse_html)
        except:
            log.debug('nestedExpr not available.')
            log.warning('Old version of pyparsing detected. Version 1.4.8 or '
                        'later is recommended. Parsing of html labels may not '
                        'work properly.')
            html_text = Combine(Literal("<<") + OneOrMore(CharsNotIn(",]")))


        ID = ( alphastring_ | html_text |
            quoted_string | #.setParseAction(strip_quotes) |
            identifier ).setName("ID")


        float_number = Combine(Optional(minus) +
            OneOrMore(Word(nums + "."))).setName("float_number")

        righthand_id =  (float_number | ID ).setName("righthand_id")

        port_angle = (at + ID).setName("port_angle")

        port_location = ((OneOrMore(Group(colon + ID)) |
            Group(colon + lparen + ID + comma + ID + rparen))).setName("port_location")

        port = Combine((Group(port_location + Optional(port_angle)) |
            Group(port_angle + Optional(port_location)))).setName("port")

        node_id = (ID + Optional(port))
        a_list = OneOrMore(ID + Optional(equals + righthand_id) +
            Optional(comma.suppress())).setName("a_list")

        attr_list = OneOrMore(lbrack + Optional(a_list) +
            rbrack).setName("attr_list").setResultsName('attrlist')

        attr_stmt = ((graph_ | node_ | edge_) + attr_list).setName("attr_stmt")

        edgeop = (Literal("--") | Literal("->")).setName("edgeop")

        stmt_list = Forward()
        graph_stmt = (lbrace + Optional(stmt_list) +
            rbrace + Optional(semi) ).setName("graph_stmt")


        edge_point = Forward()

#.........这里部分代码省略.........
开发者ID:adamdoupe,项目名称:dissertation,代码行数:103,代码来源:dotparsing.py

示例11: Suppress

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
AT = Suppress('@')
EQUALS = Suppress('=')

field_val = Word(nums) | QuotedString('{', endQuoteChar='}', multiline=True,
                                      convertWhitespaceEscapes=False)
title_field = Group(kwd('title') + EQUALS + field_val)
journal_field = Group(kwd('journal') + EQUALS + field_val)
year_field = Group(kwd('year') + EQUALS + field_val)
volume_field = Group(kwd('volume') + EQUALS + field_val)
pages_field = Group(kwd('pages') + EQUALS + field_val)
abstract_field = Group(kwd('abstract') + EQUALS + field_val)
doi_field = Group(kwd('doi') + EQUALS + field_val)
other_field = Group(Word(alphanums) + EQUALS + field_val)

author = OneOrMore(~kwd('and') + Word(alphas + alphas8bit + '.,-'))
author.setParseAction(lambda xx: ' '.join(str(x) for x in xx))
author_list = LCURLY + delimitedList(author, 'and') + RCURLY
author_field = Group(kwd('author') + EQUALS + Group(author_list))

entry_item = (title_field | author_field | journal_field | year_field
              | volume_field | pages_field | abstract_field | doi_field
              | Suppress(other_field))


class BibEntry(object):
    def __init__(self, type, cite_key, fields):
        self.type = type
        self.cite_key = cite_key
        self.fields = fields
        self.__dict__.update(**fields)
开发者ID:Eigenstate,项目名称:msmbuilder,代码行数:32,代码来源:bibparse.py

示例12: graph_definition

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
def graph_definition():
	global graphparser
	
	if not graphparser:
		# punctuation
		colon  = Literal(":")
		lbrace = Literal("{")
		rbrace = Literal("}")
		lbrack = Literal("[")
		rbrack = Literal("]")
		lparen = Literal("(")
		rparen = Literal(")")
		equals = Literal("=")
		comma  = Literal(",")
		dot    = Literal(".")
		slash  = Literal("/")
		bslash = Literal("\\")
		star   = Literal("*")
		semi   = Literal(";")
		at     = Literal("@")
		minus  = Literal("-")
		
		# keywords
		strict_    = Literal("strict")
		graph_     = Literal("graph")
		digraph_   = Literal("digraph")
		subgraph_  = Literal("subgraph")
		node_      = Literal("node")
		edge_      = Literal("edge")

		
		# token definitions
		
		identifier = Word(alphanums + "_" ).setName("identifier")
		
 		double_quoted_string = dblQuotedString

		alphastring_ = OneOrMore(CharsNotIn(_noncomma))

		ID = (identifier | double_quoted_string.setParseAction(strip_quotes) |\
			alphastring_).setName("ID")
			
		html_text = Combine(Literal("<<") + OneOrMore(CharsNotIn(",]")))
		
		float_number = Combine(Optional(minus) +	\
			OneOrMore(Word(nums + "."))).setName("float_number")
			
		righthand_id =  (float_number | ID | html_text).setName("righthand_id")

		port_angle = (at + ID).setName("port_angle")
		
		port_location = (Group(colon + ID) |	\
			Group(colon + lparen + ID + comma + ID + rparen)).setName("port_location")
			
		port = (Group(port_location + Optional(port_angle)) |	\
			Group(port_angle + Optional(port_location))).setName("port")
			
		node_id = (ID + Optional(port))
		a_list = OneOrMore(ID + Optional(equals.suppress() + righthand_id) +	\
			Optional(comma.suppress())).setName("a_list")
			
		attr_list = OneOrMore(lbrack.suppress() + Optional(a_list) +	\
			rbrack.suppress()).setName("attr_list")
			
		attr_stmt = (Group(graph_ | node_ | edge_) + attr_list).setName("attr_stmt")

		edgeop = (Literal("--") | Literal("->")).setName("edgeop")

		stmt_list = Forward()
		graph_stmt = Group(lbrace.suppress() + Optional(stmt_list) +	\
			rbrace.suppress()).setName("graph_stmt")
			
		subgraph = (Group(Optional(subgraph_ + Optional(ID)) + graph_stmt) |	\
			Group(subgraph_ + ID)).setName("subgraph")
			
		edgeRHS = OneOrMore(edgeop + Group(node_id | subgraph))
		
		edge_stmt = Group(node_id | subgraph) + edgeRHS + Optional(attr_list)

		node_stmt = (node_id + Optional(attr_list) + Optional(semi.suppress())).setName("node_stmt")
		
		assignment = (ID + equals.suppress() + righthand_id).setName("assignment")
		stmt =  (assignment | edge_stmt | attr_stmt | subgraph | node_stmt).setName("stmt")
		stmt_list << OneOrMore(stmt + Optional(semi.suppress()))

		graphparser = (Optional(strict_) + Group((graph_ | digraph_)) +	\
			Optional(ID) + graph_stmt).setResultsName("graph")

		singleLineComment = "//" + restOfLine
		
		
		# actions
		
		graphparser.ignore(singleLineComment)
		graphparser.ignore(cStyleComment)

		assignment.setParseAction(push_attr_list)
		a_list.setParseAction(push_attr_list)
		edge_stmt.setParseAction(push_edge_stmt)
		node_stmt.setParseAction(push_node_stmt)
#.........这里部分代码省略.........
开发者ID:hristozov,项目名称:Yogi,代码行数:103,代码来源:dot_parser.py

示例13: _build_asn1_grammar

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]

#.........这里部分代码省略.........
    restricted_integer_type = INTEGER + braced_list(named_number)
    boolean_type = BOOLEAN
    real_type = REAL
    null_type = NULL
    object_identifier_type = OBJECT_IDENTIFIER
    octetstring_type = OCTET_STRING
    unrestricted_characterstring_type = CHARACTER_STRING
    restricted_characterstring_type = BMPString | GeneralString | \
                                      GraphicString | IA5String | \
                                      ISO646String | NumericString | \
                                      PrintableString | TeletexString | \
                                      T61String | UniversalString | \
                                      UTF8String | VideotexString | VisibleString
    characterstring_type = restricted_characterstring_type | unrestricted_characterstring_type
    useful_type = GeneralizedTime | UTCTime | ObjectDescriptor

    # todo: consider other builtins from 16.2
    simple_type = (boolean_type | null_type | octetstring_type | characterstring_type | real_type | plain_integer_type | object_identifier_type | useful_type) + Optional(constraint)
    constructed_type = choice_type | sequence_type | set_type
    value_list_type = restricted_integer_type | enumerated_type
    builtin_type = value_list_type | tagged_type | simple_type | constructed_type | sequenceof_type | setof_type | bitstring_type

    type_ << (builtin_type | referenced_type)

    # EXT: identifier should not be Optional here, but
    # our other ASN.1 code generator supports unnamed members,
    # and we use them.
    named_type << (Optional(identifier) + type_)

    # EXT: Trailing semi-colon is not allowed by standard
    # grammar, but our other ASN.1 code generator accepts it
    # and we happen to use it.
    type_assignment = typereference + '::=' + type_ + Suppress(Optional(';'))
    value_assignment = valuereference + type_ + '::=' + value

    assignment = type_assignment | value_assignment
    assignment_list = OneOrMore(assignment)

    assigned_identifier = Optional(object_identifier_value | defined_value)
    global_module_reference = module_reference + assigned_identifier

    symbol = Unique(reference)  # TODO: parameterized reference?
    symbol_list = Group(delimitedList(symbol))
    symbols_from_module = symbol_list + Suppress(FROM) + global_module_reference
    symbols_from_module_list = OneOrMore(symbols_from_module)
    symbols_imported = Optional(symbols_from_module_list)
    exports = Optional(Suppress(EXPORTS) + symbol_list + Suppress(';'))
    imports = Optional(Suppress(IMPORTS) + symbols_imported + Suppress(';'))

    module_body = (exports + imports + assignment_list) | empty
    module_defaults = Suppress(tag_default + extension_default)  # we don't want these in the AST
    module_identifier = module_reference + definitive_identifier
    module_definition = module_identifier + DEFINITIONS + module_defaults + '::=' + BEGIN + module_body + END

    module_definition.ignore(comment)

    # Mark up the parse results with token tags
    identifier.setParseAction(annotate('Identifier'))
    named_number_value.setParseAction(annotate('Value'))
    tag.setParseAction(annotate('Tag'))
    class_.setParseAction(annotate('TagClass'))
    class_number.setParseAction(annotate('TagClassNumber'))
    type_.setParseAction(annotate('Type'))
    simple_type.setParseAction(annotate('SimpleType'))
    choice_type.setParseAction(annotate('ChoiceType'))
    sequence_type.setParseAction(annotate('SequenceType'))
    set_type.setParseAction(annotate('SetType'))
    value_list_type.setParseAction(annotate('ValueListType'))
    bitstring_type.setParseAction(annotate('BitStringType'))
    referenced_type.setParseAction(annotate('ReferencedType'))
    sequenceof_type.setParseAction(annotate('SequenceOfType'))
    setof_type.setParseAction(annotate('SetOfType'))
    named_number.setParseAction(annotate('NamedValue'))
    constraint.setParseAction(annotate('Constraint'))
    size_constraint.setParseAction(annotate('SizeConstraint'))
    component_type.setParseAction(annotate('ComponentType'))
    component_type_optional.setParseAction(annotate('ComponentTypeOptional'))
    component_type_default.setParseAction(annotate('ComponentTypeDefault'))
    component_type_components_of.setParseAction(annotate('ComponentTypeComponentsOf'))
    tagged_type.setParseAction(annotate('TaggedType'))
    named_type.setParseAction(annotate('NamedType'))
    type_assignment.setParseAction(annotate('TypeAssignment'))
    value_assignment.setParseAction(annotate('ValueAssignment'))
    valuereference.setParseAction(annotate('ValueReference'))
    module_reference.setParseAction(annotate('ModuleReference'))
    module_body.setParseAction(annotate('ModuleBody'))
    module_definition.setParseAction(annotate('ModuleDefinition'))
    extension_marker.setParseAction(annotate('ExtensionMarker'))
    name_form.setParseAction(annotate('NameForm'))
    number_form.setParseAction(annotate('NumberForm'))
    name_and_number_form.setParseAction(annotate('NameAndNumberForm'))
    object_identifier_value.setParseAction(annotate('ObjectIdentifierValue'))
    definitive_identifier.setParseAction(annotate('DefinitiveIdentifier'))
    definitive_number_form.setParseAction(annotate('DefinitiveNumberForm'))
    definitive_name_and_number_form.setParseAction(annotate('DefinitiveNameAndNumberForm'))
    imports.setParseAction(annotate('Imports'))
    exports.setParseAction(annotate('Exports'))
    assignment_list.setParseAction(annotate('AssignmentList'))

    return module_definition
开发者ID:johnteslade,项目名称:asn1ate,代码行数:104,代码来源:parser.py

示例14: Word

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
NO_BRTS = printables.replace('(', '').replace(')', '')
SINGLE = Word(NO_BRTS.replace('*', ''))
WILDCARDS = Optional('*') + SINGLE + Optional('*') + WordEnd(wordChars=NO_BRTS)
QUOTED = quotedString.setParseAction(removeQuotes)

OPER_AND = CaselessLiteral('and')
OPER_OR = CaselessLiteral('or')
OPER_NOT = '-'

TERM = Combine(Optional(Word(alphas).setResultsName('meta') + ':') +
               (QUOTED.setResultsName('query') |
                WILDCARDS.setResultsName('query')))
TERM.setParseAction(create_q)

EXPRESSION = operatorPrecedence(TERM, [
    (OPER_NOT, 1, opAssoc.RIGHT),
    (OPER_OR, 2, opAssoc.LEFT),
    (Optional(OPER_AND, default='and'), 2, opAssoc.LEFT)])
EXPRESSION.setParseAction(union_q)

QUERY = OneOrMore(EXPRESSION) + StringEnd()
QUERY.setParseAction(union_q)


def advanced_search(pattern):
    """
    Parse the grammar of a pattern and build a queryset with it.
    """
    query_parsed = QUERY.parseString(pattern)
    return Entry.published.filter(query_parsed[0]).distinct()
开发者ID:Fantomas42,项目名称:django-blog-zinnia,代码行数:32,代码来源:search.py

示例15: Word

# 需要导入模块: from pyparsing import OneOrMore [as 别名]
# 或者: from pyparsing.OneOrMore import setParseAction [as 别名]
NO_BRTS = printables.replace('(', '').replace(')', '')
SINGLE = Word(NO_BRTS.replace('*', ''))
WILDCARDS = Optional('*') + SINGLE + Optional('*') + WordEnd(wordChars=NO_BRTS)
QUOTED = quotedString.setParseAction(removeQuotes)

OPER_AND = CaselessLiteral('and')
OPER_OR = CaselessLiteral('or')
OPER_NOT = '-'

TERM = Combine(Optional(Word(alphas).setResultsName('meta') + ':') +
               (QUOTED.setResultsName('query') |
                WILDCARDS.setResultsName('query')))
TERM.setParseAction(createQ)

EXPRESSION = operatorPrecedence(TERM, [
    (OPER_NOT, 1, opAssoc.RIGHT),
    (OPER_OR, 2, opAssoc.LEFT),
    (Optional(OPER_AND, default='and'), 2, opAssoc.LEFT)])
EXPRESSION.setParseAction(unionQ)

QUERY = OneOrMore(EXPRESSION) + StringEnd()
QUERY.setParseAction(unionQ)


def advanced_search(pattern):
    """Parse the grammar of a pattern
    and build a queryset with it"""
    query_parsed = QUERY.parseString(pattern)
    return Node.published.filter(query_parsed[0]).distinct()
开发者ID:Big-Data,项目名称:gnowsys-studio,代码行数:31,代码来源:search.py


注:本文中的pyparsing.OneOrMore.setParseAction方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。