本文整理汇总了Python中re.finditer方法的典型用法代码示例。如果您正苦于以下问题:Python re.finditer方法的具体用法?Python re.finditer怎么用?Python re.finditer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类re
的用法示例。
在下文中一共展示了re.finditer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tryAlignExact
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def tryAlignExact(query, revquery, target, aligner):
f_results = [m for m in re.finditer(query, target)]
r_results = [m for m in re.finditer(revquery, target)]
if len(f_results) > 0:
aln = RemapAlignment(f_results[0], query, aligner.match)
strand = "+"
elif len(r_results) > 0:
aln = RemapAlignment(r_results[0], revquery, aligner.match)
strand = "-"
else:
return None
if len(f_results) + len(r_results) > 1:
aln.score2 = aln.score
return strand, aln
示例2: normalize
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def normalize(md):
'''Normalize anchors.'''
def on_match(link):
desc = link.group(1)
old = link.group(2)
href = (link.group(2)
.lower()
.replace('%20', '-')
.replace(" ", "-")
.replace("~", "")
.replace(".", ""))
old, new = f'[{desc}]({old})', f'[{desc}]({href})'
print(old, new)
return old, new
replacers = set((on_match(x) for x in re.finditer(r'\[([^\]\[]*)\]\((#[^\)]*)\)', md)))
return ft.reduce(lambda md, x: md.replace(x[0], x[1]), replacers, md)
示例3: _locate_gadgets
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def _locate_gadgets(self, section, terminals, gadget_type):
disassembler = cs.Cs(cs.CS_ARCH_X86, cs.CS_MODE_32)
for terminal in terminals:
matches = [match.start() for match in re.finditer(terminal[0],
section["data"])]
for index in matches:
for i in range(self._options.depth):
gadget = ""
instructions = disassembler.disasm_lite(
section["data"][index-i:index+terminal[1]],
section["vaddr"]+index)
for instruction in instructions:
gadget += (str(instruction[2]) + " " +
str(instruction[3]) + " ; ")
if gadget:
gadget = gadget.replace(" ", " ")
gadget = gadget[:-3]
self._gadgets += [{"vaddr" : section["vaddr"]+index-i,
"insts" : gadget,
"gadget_type" : gadget_type}]
示例4: ParseNestedParen
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def ParseNestedParen(string, level):
"""
Generate strings contained in nested (), indexing i = level
"""
if len(re.findall(r"\(", string)) == len(re.findall(r"\)", string)):
LeftRightIndex = [x for x in zip(
[Left.start()+1 for Left in re.finditer(r'\(', string)],
reversed([Right.start() for Right in re.finditer(r'\)', string)]))]
elif len(re.findall(r"\(", string)) > len(re.findall(r"\)", string)):
return ParseNestedParen(string + ')', level)
elif len(re.findall(r"\(", string)) < len(re.findall(r"\)", string)):
return ParseNestedParen('(' + string, level)
else:
return 'fail'
return [string[LeftRightIndex[level][0]:LeftRightIndex[level][1]]]
示例5: formatter_parser
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def formatter_parser(*args,**kw):
"""parse the argument as a format string"""
assert len(args)==1
assert isinstance(args[0], str)
_result=[]
for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]):
_pre, _fmt = _match.groups()
if _fmt is None:
_result.append((_pre, None, None, None))
elif _fmt == '{}':
_result.append((_pre, '', '', None))
else:
_m=re.match("\{([^!]*)!?(.*)?\}", _fmt)
_name=_m.groups(0)
_flags=_m.groups(1)
_result.append((_pre, _name, _flags, None))
return _result
示例6: test_finditer
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", 1, 10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=1, endpos=10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", endpos=10, pos=1)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=3, endpos=8)
self.assertEqual([item.group(0) for item in iter],
["::", "::"])
示例7: _ieer_read_text
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def _ieer_read_text(s, top_node):
stack = [Tree(top_node, [])]
for piece_m in re.finditer('<[^>]+>|[^\s<]+', s):
piece = piece_m.group()
try:
if piece.startswith('<b_'):
m = _IEER_TYPE_RE.match(piece)
if m is None: print 'XXXX', piece
chunk = Tree(m.group('type'), [])
stack[-1].append(chunk)
stack.append(chunk)
elif piece.startswith('<e_'):
stack.pop()
# elif piece.startswith('<'):
# print "ERROR:", piece
# raise ValueError # Unexpected HTML
else:
stack[-1].append(piece)
except (IndexError, ValueError):
raise ValueError('Bad IEER string (error at character %d)' %
piece_m.start())
if len(stack) != 1:
raise ValueError('Bad IEER string')
return stack[0]
示例8: show_help
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def show_help(self, tab):
self.helpbox['state'] = 'normal'
self.helpbox.delete('1.0', 'end')
for (name, tabstops, text) in self.HELP:
if name == tab:
text = text.replace('<<TAGSET>>', '\n'.join(
('\t%s\t%s' % item for item in sorted(list(self.tagset.items()),
key=lambda t_w:re.match('\w+',t_w[0]) and (0,t_w[0]) or (1,t_w[0])))))
self.helptabs[name].config(**self._HELPTAB_FG_PARAMS)
self.helpbox.config(tabs=tabstops)
self.helpbox.insert('1.0', text+'\n'*20)
C = '1.0 + %d chars'
for (tag, params) in self.HELP_AUTOTAG:
pattern = '(?s)(<%s>)(.*?)(</%s>)' % (tag, tag)
for m in re.finditer(pattern, text):
self.helpbox.tag_add('elide',
C % m.start(1), C % m.end(1))
self.helpbox.tag_add('tag-%s' % tag,
C % m.start(2), C % m.end(2))
self.helpbox.tag_add('elide',
C % m.start(3), C % m.end(3))
else:
self.helptabs[name].config(**self._HELPTAB_BG_PARAMS)
self.helpbox['state'] = 'disabled'
示例9: _syntax_highlight_grammar
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def _syntax_highlight_grammar(self, grammar):
if self.top is None: return
self.grammarbox.tag_remove('comment', '1.0', 'end')
self.grammarbox.tag_remove('angle', '1.0', 'end')
self.grammarbox.tag_remove('brace', '1.0', 'end')
self.grammarbox.tag_add('hangindent', '1.0', 'end')
for lineno, line in enumerate(grammar.split('\n')):
if not line.strip(): continue
m = re.match(r'(\\.|[^#])*(#.*)?', line)
comment_start = None
if m.group(2):
comment_start = m.start(2)
s = '%d.%d' % (lineno+1, m.start(2))
e = '%d.%d' % (lineno+1, m.end(2))
self.grammarbox.tag_add('comment', s, e)
for m in re.finditer('[<>{}]', line):
if comment_start is not None and m.start() >= comment_start:
break
s = '%d.%d' % (lineno+1, m.start())
e = '%d.%d' % (lineno+1, m.end())
if m.group() in '<>':
self.grammarbox.tag_add('angle', s, e)
else:
self.grammarbox.tag_add('brace', s, e)
示例10: _mark_paragraph_breaks
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def _mark_paragraph_breaks(self, text):
"""Identifies indented text or line breaks as the beginning of
paragraphs"""
MIN_PARAGRAPH = 100
pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
matches = pattern.finditer(text)
last_break = 0
pbreaks = [0]
for pb in matches:
if pb.start()-last_break < MIN_PARAGRAPH:
continue
else:
pbreaks.append(pb.start())
last_break = pb.start()
return pbreaks
示例11: get_ydk_def_names
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def get_ydk_def_names(python_ydk_defs):
"""
Get the Python YDK definition names
"""
logging.debug('get_ydk_def_names: python_ydk_defs : \n' + python_ydk_defs)
import re
ydk_def_names = ""
for m in re.finditer(r"def \w+()", python_ydk_defs):
logging.debug('get_ydk_def_names: m.group(0): \n' + m.group(0))
tmp_str = m.group(0).replace('def ', '')
ydk_def_names = ydk_def_names + tmp_str + " "
logging.debug('get_ydk_def_names: ydk_def_names : \n' + ydk_def_names)
return ydk_def_names
示例12: decorate
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def decorate(self, pageOffset=None):
page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
off = self.dataModel.getOffset()
Match = [(m.start(), m.end()) for m in re.finditer(b'([a-zA-Z0-9\\-\\\\.%*:/? _<>]){4,}', page)]
for s, e in Match:
for i in range(e - s):
idx = off + s + i
if idx not in self.penMap:
self.penMap[off + s + i] = self.redPen
self.page = page
return self.page
示例13: get_incremented_filename
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def get_incremented_filename(filename, namestring):
import re
min_index = 1
pattern = r"(.*?)(\s*)(\d*)$"
basename, space, index_str = re.search(pattern, filename).groups()
search_pattern = fr"^{re.escape(basename)}\s*(\d+)$"
if index_str:
min_index = int(index_str)
zero_padding = len(index_str) if index_str.startswith("0") else 0
naming_pattern = basename + space + "{:0" + str(zero_padding) + "d}"
else:
naming_pattern = basename + " {:02d}"
names = re.finditer(search_pattern, namestring, re.I | re.M)
inds = [int(name.group(1)) for name in names]
max_index = min_index + len(inds)
for i in range(min_index, max_index):
if i not in inds:
return naming_pattern.format(i)
return naming_pattern.format(max_index)
示例14: get_pragma_spec
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def get_pragma_spec(source: str, path: Optional[str] = None) -> NpmSpec:
"""
Extracts pragma information from Solidity source code.
Args:
source: Solidity source code
path: Optional path to the source (only used for error reporting)
Returns: NpmSpec object
"""
pragma_match = next(re.finditer(r"pragma +solidity([^;]*);", source), None)
if pragma_match is not None:
pragma_string = pragma_match.groups()[0]
pragma_string = " ".join(pragma_string.split())
return NpmSpec(pragma_string)
if path:
raise PragmaError(f"No version pragma in '{path}'")
raise PragmaError("String does not contain a version pragma")
示例15: _assert_urls_in_text
# 需要导入模块: import re [as 别名]
# 或者: from re import finditer [as 别名]
def _assert_urls_in_text(text, expected_num, line_url_pattern):
lines = [line.rstrip() for line in text.split('\n')]
urls = []
for line in lines:
for match in re.finditer(line_url_pattern, line):
match_groupdict = match.groupdict()
urls.append(match_groupdict['url'])
num_of_urls = len(urls)
msg_format = "Found {num_of_urls} urls instead of {expected_num} in:\n{text}" # noqa: E501
msg = msg_format.format(
num_of_urls=num_of_urls,
expected_num=expected_num,
text=text,
)
assert num_of_urls == expected_num, msg
return urls