本文整理汇总了Python中pyparsing.Regex类的典型用法代码示例。如果您正苦于以下问题:Python Regex类的具体用法?Python Regex怎么用?Python Regex使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Regex类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: define_identifier
def define_identifier(self):
"""
Return the syntax definition for an identifier.
"""
# --- Defining the individual identifiers:
# Getting all the Unicode numbers in a single string:
unicode_numbers = "".join([unichr(n) for n in xrange(0x10000)
if unichr(n).isdigit()])
unicode_number_expr = Regex("[%s]" % unicode_numbers, re.UNICODE)
space_char = re.escape(self._grammar.get_token("identifier_spacing"))
identifier0 = Regex("[\w%s]+" % space_char, re.UNICODE)
# Identifiers cannot start with a number:
identifier0 = Combine(~unicode_number_expr + identifier0)
identifier0.setName("individual_identifier")
# --- Defining the namespaces:
namespace_sep = Suppress(self._grammar.get_token("namespace_separator"))
namespace = Group(ZeroOrMore(identifier0 + namespace_sep))
namespace.setName("namespace")
# --- The full identifier, which could have a namespace:
identifier = Combine(namespace.setResultsName("namespace_parts") +
identifier0.setResultsName("identifier"))
identifier.setName("full_identifier")
return identifier
示例2: nexus_iter
def nexus_iter(infile):
import pyparsing
pyparsing.ParserElement.enablePackrat()
from pyparsing import Word, Literal, QuotedString, CaselessKeyword, \
OneOrMore, Group, Optional, Suppress, Regex, Dict
## beginblock = Suppress(CaselessKeyword("begin") +
## CaselessKeyword("trees") + ";")
## endblock = Suppress((CaselessKeyword("end") |
## CaselessKeyword("endblock")) + ";")
comment = Optional(Suppress("[&") + Regex(r'[^]]+') + Suppress("]"))
## translate = CaselessKeyword("translate").suppress()
name = Word(string.letters+string.digits+"_.") | QuotedString("'")
## ttrec = Group(Word(string.digits).setResultsName("number") +
## name.setResultsName("name") +
## Optional(",").suppress())
## ttable = Group(translate + OneOrMore(ttrec) + Suppress(";"))
newick = Regex(r'[^;]+;')
tree = (CaselessKeyword("tree").suppress() +
Optional("*").suppress() +
name.setResultsName("tree_name") +
comment.setResultsName("tree_comment") +
Suppress("=") +
comment.setResultsName("root_comment") +
newick.setResultsName("newick"))
## treesblock = Group(beginblock +
## Optional(ttable.setResultsName("ttable")) +
## Group(OneOrMore(tree)) +
## endblock)
def not_begin(s): return s.strip().lower() != "begin trees;"
def not_end(s): return s.strip().lower() not in ("end;", "endblock;")
def parse_ttable(f):
ttable = {}
while True:
s = f.next().strip()
if not s: continue
if s.lower() == ";": break
if s[-1] == ",": s = s[:-1]
k, v = s.split()
ttable[k] = v
if s[-1] == ";": break
return ttable
# read lines between "begin trees;" and "end;"
f = itertools.takewhile(not_end, itertools.dropwhile(not_begin, infile))
s = f.next().strip().lower()
if s != "begin trees;":
print sys.stderr, "Expecting 'begin trees;', got %s" % s
raise StopIteration
ttable = {}
while True:
try: s = f.next().strip()
except StopIteration: break
if not s: continue
if s.lower() == "translate":
ttable = parse_ttable(f)
print "ttable: %s" % len(ttable)
elif s.split()[0].lower()=='tree':
match = tree.parseString(s)
yield nexus.Newick(match, ttable)
示例3: parseDate
def parseDate(self, dateString):
dateGrammar = Regex("\d{4}")("year") + Regex("\d{2}")("month") +\
Regex("\d{2}")("day") + Regex("\d{2}")("hours") +\
Suppress(":") + Regex("\d{2}")("minutes")
results = dateGrammar.parseString(dateString)
return {
"year" : results["year"],
"month" : results["month"],
"day" : results["day"],
"hours" : results["hours"],
"minutes" : results["minutes"],
}
示例4: _make_grammar
def _make_grammar(self):
from pyparsing import (QuotedString, ZeroOrMore, Combine,
Literal, Optional, OneOrMore,
Regex, CaselessKeyword)
def escape_handler(s, loc, toks):
if toks[0] == '\\\\':
return "\\"
elif toks[0] == '\\\'':
return "'"
elif toks[0] == '\\"':
return '"'
elif toks[0] == '\\f':
return "\f"
elif toks[0] == '\\n':
return "\n"
elif toks[0] == '\\r':
return "\r"
elif toks[0] == '\\t':
return "\t"
elif toks[0] == '\\ ':
return " "
else:
return toks[0][1:]
escape = Combine(Regex(r'\\.')).setParseAction(escape_handler)
word = Combine(OneOrMore(escape | Regex(r'[^\s\\]+')))
whitespace = Regex(r'\s+').suppress()
quotedstring = Combine(OneOrMore(QuotedString('"', escChar='\\') | QuotedString("'", escChar='\\')))
command = Regex(r'[^\s:]+') + Literal(":").suppress() + (quotedstring | word)
include = quotedstring | command | word
exclude = (Literal("-") | Literal("^")).suppress() + (quotedstring | command | word)
or_keyword = CaselessKeyword("or")
and_keyword = CaselessKeyword("and")
keyword = or_keyword | and_keyword
argument = (keyword | exclude | include)
expr = ZeroOrMore(Optional(whitespace) + argument)
# arguments.leaveWhitespace()
command.setParseAction(CommandExpr)
include.setParseAction(IncludeExpr)
exclude.setParseAction(ExcludeExpr)
or_keyword.setParseAction(OrKeywordExpr)
and_keyword.setParseAction(AndKeywordExpr)
# or_expr.setParseAction(lambda s, loc, toks: OrOperator(toks[0], toks[2]))
# and_expr.setParseAction(lambda s, loc, toks: AndOperator(toks[0], toks[2]))
# no_expr.setParseAction(lambda s, loc, toks: AndOperator(toks[0], toks[1]))
# expr.setParseAction(Operator)
return expr
示例5: __init__
def __init__(self, EvaluateVariableChild=None, EvaluateNumberChild=None):
EvaluateVariableChild = EvaluateVariableChild or EvaluateVariable
EvaluateNumberChild = EvaluateNumberChild or EvaluateNumber
# what is a float number
floatNumber = Regex(r'[-]?\d+(\.\d*)?([eE][-+]?\d+)?')
# a variable is a combination of letters, numbers, and underscor
variable = Word(alphanums + "_")
# a sign is plus or minus
signOp = oneOf('+ -')
# an operand is a variable or a floating point number
operand = floatNumber ^ variable
# when a floatNumber is found, parse it with evaluate number
floatNumber.setParseAction(EvaluateNumberChild)
# when a variable is found, parse it with the EvaluateVariableChild
# or EvaluateVariable
variable.setParseAction(EvaluateVariableChild)
# comparisons include lt,le,gt,ge,eq,ne
comparisonOp = oneOf("< <= > >= == !=")
# negation of the boolean is !
notOp = oneOf("!")
# an expression is a either a comparison or
# a NOT operation (where NOT a is essentially (a == False))
comparisonExpression = operatorPrecedence(operand,
[
(comparisonOp,
2,
opAssoc.LEFT,
EvaluateComparison
),
(notOp,
1,
opAssoc.RIGHT,
EvaluateNot
),
])
# boolean logic of AND or OR
boolOp = oneOf("& |")
# a bool expression contains a nested bool expression or a comparison,
# joined with a boolean operation
boolExpression = Forward()
boolPossible = boolExpression | comparisonExpression
self.boolExpression = operatorPrecedence(boolPossible,
[
(boolOp,
2,
opAssoc.RIGHT,
EvaluateOrAnd
),
])
return
示例6: parse_treesblock
def parse_treesblock(infile):
import string
from pyparsing import Optional, Word, Regex, CaselessKeyword, Suppress
from pyparsing import QuotedString
comment = Optional(Suppress("[&") + Regex(r'[^]]+') + Suppress("]"))
name = Word(alphanums+"_") | QuotedString("'")
newick = Regex(r'[^;]+;')
tree = (CaselessKeyword("tree").suppress() +
Optional("*").suppress() +
name.setResultsName("tree_name") +
comment.setResultsName("tree_comment") +
Suppress("=") +
comment.setResultsName("root_comment") +
newick.setResultsName("newick"))
## treesblock = Group(beginblock +
## Optional(ttable.setResultsName("ttable")) +
## Group(OneOrMore(tree)) +
## endblock)
def parse_ttable(f):
ttable = {}
while True:
s = f.next().strip()
if s.lower() == ";":
break
if s[-1] in ",;":
s = s[:-1]
k, v = s.split()
ttable[k] = v
if s[-1] == ";":
break
return ttable
ttable = {}
while True:
try:
s = infile.next().strip()
except StopIteration:
break
if s.lower() == "translate":
ttable = parse_ttable(infile)
# print("ttable: %s" % len(ttable))
else:
match = tree.parseString(s)
yield Newick(match, ttable)
示例7: getkw_bnf
def getkw_bnf(self):
sect_begin = Literal("{").suppress()
sect_end = Literal("}").suppress()
array_begin = Literal("[").suppress()
array_end = Literal("]").suppress()
tag_begin = Literal("<").suppress()
tag_end = Literal(">").suppress()
eql = Literal("=").suppress()
dmark = Literal('$').suppress()
end_data=Literal('$end').suppress()
prtable = alphanums+r'!$%&*+-./<>[email protected]^_|~'
ival=Regex('[-]?\d+')
dval=Regex('-?\d+\.\d*([eE]?[+-]?\d+)?')
lval=Regex('([Yy]es|[Nn]o|[Tt]rue|[Ff]alse|[Oo]n|[Oo]ff)')
# Helper definitions
kstr= quotedString.setParseAction(removeQuotes) ^ \
dval ^ ival ^ lval ^ Word(prtable)
name = Word(alphas+"_",alphanums+"_")
vec=array_begin+delimitedList(dval ^ ival ^ lval ^ Word(prtable) ^ \
Literal("\n").suppress() ^ \
quotedString.setParseAction(removeQuotes))+array_end
sect=name+sect_begin
tag_sect=name+Group(tag_begin+name+tag_end)+sect_begin
# Grammar
keyword = name + eql + kstr
vector = name + eql + vec
data=Combine(dmark+name)+SkipTo(end_data)+end_data
section=Forward()
sect_def=(sect | tag_sect ) #| vec_sect)
input=section | data | vector | keyword
section << sect_def+ZeroOrMore(input) + sect_end
# Parsing actions
ival.setParseAction(self.conv_ival)
dval.setParseAction(self.conv_dval)
lval.setParseAction(self.conv_lval)
keyword.setParseAction(self.store_key)
vector.setParseAction(self.store_vector)
data.setParseAction(self.store_data)
sect.setParseAction(self.add_sect)
tag_sect.setParseAction(self.add_sect)
sect_end.setParseAction(self.pop_sect)
bnf=ZeroOrMore(input) + StringEnd().setFailAction(parse_error)
bnf.ignore(pythonStyleComment)
return bnf
示例8: make_sexp_parser
def make_sexp_parser ():
"""
Returns a simple parser for nested lists of real numbers. Round
parens () are assumed as customary in lisps.
"""
# Punctuation literals (note round parens):
LPAR, RPAR = map (Suppress, "()")
# Real numbers:
real_string = Regex (r"[+-]?\d+\.\d*([eE][+-]?\d+)?")
real = real_string.setParseAction (lambda tokens: float (tokens[0]))
# Voodoo:
sexp = Forward ()
sexp_list = Group (LPAR + ZeroOrMore (sexp) + RPAR)
sexp << (real | sexp_list)
return lambda s: sexp.parseString (s)[0]
示例9: ListParser
def ListParser():
"""
A parser for list columns, where each list is composed of pairs of values.
"""
value = Regex(r'[-+]?[0-9]+(?:\.[0-9]*)?(?:e[-+]?[0-9]+)?', IGNORECASE)
value.setParseAction(lambda toks: float(toks[0]))
item = Suppress('(') + value + Suppress(',') + value + Suppress(')')
item.setParseAction(tuple)
lst = Suppress('[') + delimitedList(item) + Suppress(']')
lst.setParseAction(list)
def parse(s):
try:
return lst.parseString(s).asList()
except ParseBaseException as e:
raise ValueError(e)
return parse
示例10: translate
def translate(self, text, filename):
self.source = text
self.super = None
self.inheritance = 0
self.declaration_lines = ['inheritance = 0']
self.block_lines = []
self.body_lines = ['def body():']
self.target_lines = self.body_lines
self.indent = 1
template_close = Literal('%>')
white = White()
attribute = Word(alphanums + '_') + Literal('=') + QuotedString('"') + Optional(white)
directive = "<%@" + Optional(white) + Word(alphanums + '_') + white + ZeroOrMore(attribute) + template_close
declaration = "<%!" + SkipTo(template_close) + template_close
expression = "<%=" + SkipTo(template_close) + template_close
scriptlet = '<%' + SkipTo(template_close) + template_close
template_text = directive | declaration | expression | scriptlet
plain_text = Regex(r'((?!<%).|\s)+', re.MULTILINE)
body = template_text | plain_text
lit = OneOrMore(body)
directive.setParseAction(self.compile_directive)
declaration.setParseAction(self.compile_declaration)
expression.setParseAction(self.compile_expression)
scriptlet.setParseAction(self.compile_scriptlet)
plain_text.setParseAction(self.compile_plain_text)
lit.leaveWhitespace()
lit.parseString(self.source)
translated = '\n' + '\n'.join(self.declaration_lines + ['\n'] + self.block_lines + ['\n'] + self.body_lines)
if self.super:
translated = self.super.module_source + translated
return translated
示例11: normalize_ip
return [(t[0][0].port, t[0][1].port)]
def normalize_ip(t):
# returns a normalized ip
return t.ip + "/" + (str(t.mask.mask) if t.mask else "32")
port = Group(Word(nums).setParseAction(to_int)('port'))
port_range = Group((port + Word("-").suppress() + port)('range'))
normalized_port_range = (port ^ port_range).setParseAction(to_port_range)
ports = delimitedList(normalized_port_range)('ports')
# IP addresses, name of another group, or sg-*
security_group = Regex("sg-[\w\d]+")
group_name = Regex("[\w\d\-]+")
mask = Word("/") + Word(nums).setParseAction(to_int)('mask')
ip= (Combine(Word(nums) + ('.' + Word(nums))*3)('ip') + Optional(mask)('mask')).setParseAction(normalize_ip)
parser = Optional(protocol)('protocol') + \
Optional(port_) + \
ports + \
(ip.setResultsName('ip_and_mask') ^ security_group.setResultsName('security_group') ^ group_name('group_name'))
class Rule(object):
def __init__(self, protocol, from_port, to_port, address=None, group=None, group_name=None):
"""constructs a new rule
示例12: to_string
# val = val.replace(".", "\\.")
elif val.startswith('`') and val.endswith('`'):
val = "'" + val[1:-1].replace("``","`") + "'"
elif val.startswith("+"):
val = val[1:]
un = ast.literal_eval(val)
return un
def to_string(instring, tokensStart, retTokens):
val = retTokens[0]
val = "'"+val[1:-1].replace("''", "\\'")+"'"
return {"literal": ast.literal_eval(val)}
# NUMBERS
realNum = Regex(r"[+-]?(\d+\.\d*|\.\d+)([eE][+-]?\d+)?").addParseAction(unquote)
intNum = Regex(r"[+-]?\d+([eE]\+?\d+)?").addParseAction(unquote)
# STRINGS, NUMBERS, VARIABLES
sqlString = Regex(r"\'(\'\'|\\.|[^'])*\'").addParseAction(to_string)
identString = Regex(r'\"(\"\"|\\.|[^"])*\"').addParseAction(unquote)
mysqlidentString = Regex(r'\`(\`\`|\\.|[^`])*\`').addParseAction(unquote)
ident = Combine(~RESERVED + (delimitedList(Literal("*") | Word(alphas + "_", alphanums + "_$") | identString | mysqlidentString, delim=".", combine=True))).setName("identifier")
# EXPRESSIONS
expr = Forward()
# CASE
case = (
CASE +
Group(ZeroOrMore((WHEN + expr("when") + THEN + expr("then")).addParseAction(to_when_call)))("case") +
示例13: define_dot_parser
def define_dot_parser(self):
"""Define dot grammar
Based on the grammar http://www.graphviz.org/doc/info/lang.html
"""
# punctuation
colon = Literal(":")
lbrace = Suppress("{")
rbrace = Suppress("}")
lbrack = Suppress("[")
rbrack = Suppress("]")
lparen = Literal("(")
rparen = Literal(")")
equals = Suppress("=")
comma = Literal(",")
dot = Literal(".")
slash = Literal("/")
bslash = Literal("\\")
star = Literal("*")
semi = Suppress(";")
at = Literal("@")
minus = Literal("-")
pluss = Suppress("+")
# keywords
strict_ = CaselessLiteral("strict")
graph_ = CaselessLiteral("graph")
digraph_ = CaselessLiteral("digraph")
subgraph_ = CaselessLiteral("subgraph")
node_ = CaselessLiteral("node")
edge_ = CaselessLiteral("edge")
punctuation_ = "".join( [ c for c in string.punctuation if c not in '_' ] ) +string.whitespace
# token definitions
identifier = Word(alphanums + "_" ).setName("identifier")
#double_quoted_string = QuotedString('"', multiline=True,escChar='\\',
# unquoteResults=True) # dblQuotedString
double_quoted_string = Regex(r'\"(?:\\\"|\\\\|[^"])*\"', re.MULTILINE)
double_quoted_string.setParseAction(removeQuotes)
quoted_string = Combine(double_quoted_string+
Optional(OneOrMore(pluss+double_quoted_string)),adjacent=False)
alphastring_ = OneOrMore(CharsNotIn(punctuation_))
def parse_html(s, loc, toks):
return '<<%s>>' % ''.join(toks[0])
opener = '<'
closer = '>'
try:
html_text = pyparsing.nestedExpr( opener, closer,
(( CharsNotIn(
opener + closer ).setParseAction( lambda t:t[0] ))
)).setParseAction(parse_html)
except:
log.debug('nestedExpr not available.')
log.warning('Old version of pyparsing detected. Version 1.4.8 or '
'later is recommended. Parsing of html labels may not '
'work properly.')
html_text = Combine(Literal("<<") + OneOrMore(CharsNotIn(",]")))
ID = ( alphastring_ | html_text |
quoted_string | #.setParseAction(strip_quotes) |
identifier ).setName("ID")
float_number = Combine(Optional(minus) +
OneOrMore(Word(nums + "."))).setName("float_number")
righthand_id = (float_number | ID ).setName("righthand_id")
port_angle = (at + ID).setName("port_angle")
port_location = ((OneOrMore(Group(colon + ID)) |
Group(colon + lparen + ID + comma + ID + rparen))).setName("port_location")
port = Combine((Group(port_location + Optional(port_angle)) |
Group(port_angle + Optional(port_location)))).setName("port")
node_id = (ID + Optional(port))
a_list = OneOrMore(ID + Optional(equals + righthand_id) +
Optional(comma.suppress())).setName("a_list")
attr_list = OneOrMore(lbrack + Optional(a_list) +
rbrack).setName("attr_list").setResultsName('attrlist')
attr_stmt = ((graph_ | node_ | edge_) + attr_list).setName("attr_stmt")
edgeop = (Literal("--") | Literal("->")).setName("edgeop")
stmt_list = Forward()
graph_stmt = (lbrace + Optional(stmt_list) +
rbrace + Optional(semi) ).setName("graph_stmt")
edge_point = Forward()
#.........这里部分代码省略.........
示例14: formula_grammar
def formula_grammar(table):
"""
Construct a parser for molecular formulas.
:Parameters:
*table* = None : PeriodicTable
If table is specified, then elements and their associated fields
will be chosen from that periodic table rather than the default.
:Returns:
*parser* : pyparsing.ParserElement.
The ``parser.parseString()`` method returns a list of
pairs (*count,fragment*), where fragment is an *isotope*,
an *element* or a list of pairs (*count,fragment*).
"""
# Recursive
composite = Forward()
mixture = Forward()
# whitespace and separators
space = Optional(White().suppress())
separator = space+Literal('+').suppress()+space
# Lookup the element in the element table
symbol = Regex("[A-Z][a-z]*")
symbol = symbol.setParseAction(lambda s,l,t: table.symbol(t[0]))
# Translate isotope
openiso = Literal('[').suppress()
closeiso = Literal(']').suppress()
isotope = Optional(~White()+openiso+Regex("[1-9][0-9]*")+closeiso,
default='0')
isotope = isotope.setParseAction(lambda s,l,t: int(t[0]) if t[0] else 0)
# Translate ion
openion = Literal('{').suppress()
closeion = Literal('}').suppress()
ion = Optional(~White()
+openion
+Regex("([1-9][0-9]*)?[+-]")
+closeion,
default='0+')
ion = ion.setParseAction(lambda s,l,t: int(t[0][-1]+(t[0][:-1] if len(t[0])>1 else '1')))
# Translate counts
fract = Regex("(0|[1-9][0-9]*|)([.][0-9]*)")
fract = fract.setParseAction(lambda s,l,t: float(t[0]) if t[0] else 1)
whole = Regex("[1-9][0-9]*")
whole = whole.setParseAction(lambda s,l,t: int(t[0]) if t[0] else 1)
count = Optional(~White()+(fract|whole),default=1)
# Convert symbol,isotope,ion,count to (count,isotope)
element = symbol+isotope+ion+count
def convert_element(string,location,tokens):
#print "convert_element received",tokens
symbol,isotope,ion,count = tokens[0:4]
if isotope != 0: symbol = symbol[isotope]
if ion != 0: symbol = symbol.ion[ion]
return (count,symbol)
element = element.setParseAction(convert_element)
# Convert "count elements" to a pair
implicit_group = count+OneOrMore(element)
def convert_implicit(string,location,tokens):
#print "implicit",tokens
count = tokens[0]
fragment = tokens[1:]
return fragment if count==1 else (count,fragment)
implicit_group = implicit_group.setParseAction(convert_implicit)
# Convert "(composite) count" to a pair
opengrp = space + Literal('(').suppress() + space
closegrp = space + Literal(')').suppress() + space
explicit_group = opengrp + composite + closegrp + count
def convert_explicit(string,location,tokens):
#print "explicit",tokens
count = tokens[-1]
fragment = tokens[:-1]
return fragment if count == 1 else (count,fragment)
explicit_group = explicit_group.setParseAction(convert_explicit)
# Build composite from a set of groups
group = implicit_group | explicit_group
implicit_separator = separator | space
composite << group + ZeroOrMore(implicit_separator + group)
density = Literal('@').suppress() + count + Optional(Regex("[ni]"),default='i')
compound = composite + Optional(density,default=None)
def convert_compound(string,location,tokens):
#print "compound",tokens
if tokens[-1] is None:
return Formula(structure=_immutable(tokens[:-1]))
elif tokens[-1] == 'n':
return Formula(structure=_immutable(tokens[:-2]), natural_density=tokens[-2])
else:
return Formula(structure=_immutable(tokens[:-2]), density=tokens[-2])
compound = compound.setParseAction(convert_compound)
#.........这里部分代码省略.........
示例15: map
ParserElement,
)
ParserElement.enablePackrat()
COLON, LBRACK, RBRACK, LBRACE, RBRACE, TILDE, CARAT = map(Literal, ":[]{}~^")
LPAR, RPAR = map(Suppress, "()")
and_ = CaselessKeyword("AND")
or_ = CaselessKeyword("OR")
not_ = CaselessKeyword("NOT")
to_ = CaselessKeyword("TO")
keyword = and_ | or_ | not_
expression = Forward()
valid_word = Regex(r'([a-zA-Z0-9*_+.-]|\\[!(){}\[\]^"~*?\\:])+').setName("word")
valid_word.setParseAction(lambda t: t[0].replace("\\\\", chr(127)).replace("\\", "").replace(chr(127), "\\"))
string = QuotedString('"')
required_modifier = Literal("+")("required")
prohibit_modifier = Literal("-")("prohibit")
integer = Regex(r"\d+").setParseAction(lambda t: int(t[0]))
proximity_modifier = Group(TILDE + integer("proximity"))
number = Regex(r"\d+(\.\d+)?").setParseAction(lambda t: float(t[0]))
fuzzy_modifier = TILDE + Optional(number, default=0.5)("fuzzy")
term = Forward()
field_name = valid_word.copy().setName("fieldname")
incl_range_search = Group(LBRACK + term("lower") + to_ + term("upper") + RBRACK)
excl_range_search = Group(LBRACE + term("lower") + to_ + term("upper") + RBRACE)