本文整理汇总了Python中regex.search方法的典型用法代码示例。如果您正苦于以下问题:Python regex.search方法的具体用法?Python regex.search怎么用?Python regex.search使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类regex
的用法示例。
在下文中一共展示了regex.search方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prepath_to_spec
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def prepath_to_spec(prepath):
'''
Given a prepath, read the correct spec recover the meta_spec that will return the same prepath for eval lab modes
example: output/a2c_cartpole_2018_06_13_220436/a2c_cartpole_t0_s0
'''
predir, _, prename, _, experiment_ts, ckpt = prepath_split(prepath)
sidx_res = re.search('_s\d+', prename)
if sidx_res: # replace the _s0 if any
prename = prename.replace(sidx_res[0], '')
spec_path = f'{predir}/{prename}_spec.json'
# read the spec of prepath
spec = read(spec_path)
# recover meta_spec
trial_index, session_index = prepath_to_idxs(prepath)
meta_spec = spec['meta']
meta_spec['experiment_ts'] = experiment_ts
meta_spec['ckpt'] = ckpt
meta_spec['experiment'] = 0
meta_spec['trial'] = trial_index
meta_spec['session'] = session_index
check_prepath = get_prepath(spec, unit='session')
assert check_prepath in prepath, f'{check_prepath}, {prepath}'
return spec
示例2: validate_single_subject
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def validate_single_subject(id, tree):
"""
No predicate should have more than one subject.
An xcomp dependent normally has no subject, but in some languages the
requirement may be weaker: it could have an overt subject if it is
correferential with a particular argument of the matrix verb. Hence we do
not check zero subjects of xcomp dependents at present.
Furthermore, in some situations we must allow two subjects (but not three or more).
If a clause acts as a nonverbal predicate of another clause, and if there is
no copula, then we must attach two subjects to the predicate of the inner
clause: one is the predicate of the inner clause, the other is the predicate
of the outer clause. This could in theory be recursive but in practice it isn't.
See also issue 34 (https://github.com/UniversalDependencies/tools/issues/34).
"""
subjects = sorted([x for x in tree['children'][id] if re.search(r"subj", lspec2ud(tree['nodes'][x][DEPREL]))])
if len(subjects) > 2:
# We test for more than 2, but in the error message we still say more than 1, so that we do not have to explain the exceptions.
testlevel = 3
testclass = 'Syntax'
testid = 'too-many-subjects'
testmessage = "Node has more than one subject: %s" % str(subjects)
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id])
示例3: try_make_hoster
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def try_make_hoster(cls: Type["Hoster"], url: str,
config: Dict[str, str]) -> Optional["Hoster"]:
"""Creates hoster if **url** is matched by its **url_pattern**"""
if config:
try:
klass: Type["Hoster"] = type(
"Customized" + cls.__name__,
(cls,),
{key+"_pattern":val for key, val in config.items()}
)
except KeyError:
logger.debug("Overrides invalid for %s - skipping", cls.__name__)
return None
else:
klass = cls
match = klass.url_re.search(url)
if match:
return klass(url, match)
return None
示例4: get_versions
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def get_versions(self, req, orig_version):
exclude = set(self.exclude)
vals = {key: val
for key, val in self.vals.items()
if key not in exclude}
link_pattern = replace_named_capture_group(self.link_pattern_compiled, vals)
link_re = re.compile(link_pattern)
result = []
for url in self.releases_urls:
files = await req.get_ftp_listing(url)
for fname in files:
match = link_re.search(fname)
if match:
data = match.groupdict()
data['fn'] = fname
data['link'] = "ftp://" + vals['host'] + fname
data['releases_url'] = url
result.append(data)
return result
示例5: match
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def match(self, text):
"""If text is matched with pattern, return variable names specified(%{pattern:variable name})
in pattern and their corresponding values.If not matched, return None.
custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair)
or custom_patterns_dir.
"""
match_obj = None
if self.fullmatch:
match_obj = self.regex_obj.fullmatch(text)
else:
match_obj = self.regex_obj.search(text)
if match_obj == None:
return None
matches = match_obj.groupdict()
for key,match in matches.items():
try:
if self.type_mapper[key] == 'int':
matches[key] = int(match)
if self.type_mapper[key] == 'float':
matches[key] = float(match)
except (TypeError, KeyError) as e:
pass
return matches
示例6: _load_search_pattern
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def _load_search_pattern(self):
self.type_mapper = {}
py_regex_pattern = self.pattern
while True:
# Finding all types specified in the groks
m = re.findall(r'%{(\w+):(\w+):(\w+)}', py_regex_pattern)
for n in m:
self.type_mapper[n[1]] = n[2]
#replace %{pattern_name:custom_name} (or %{pattern_name:custom_name:type}
# with regex and regex group name
py_regex_pattern = re.sub(r'%{(\w+):(\w+)(?::\w+)?}',
lambda m: "(?P<" + m.group(2) + ">" + self.predefined_patterns[m.group(1)].regex_str + ")",
py_regex_pattern)
#replace %{pattern_name} with regex
py_regex_pattern = re.sub(r'%{(\w+)}',
lambda m: "(" + self.predefined_patterns[m.group(1)].regex_str + ")",
py_regex_pattern)
if re.search('%{\w+(:\w+)?}', py_regex_pattern) is None:
break
self.regex_obj = re.compile(py_regex_pattern)
示例7: results_with_valid_urls
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def results_with_valid_urls(results: list, nefarious_settings: NefariousSettings):
populated_results = []
for result in results:
# try and obtain the torrent url (it can redirect to a magnet url)
try:
# add a new key to our result object with the traced torrent url
result['torrent_url'] = result['MagnetUri'] or trace_torrent_url(
swap_jackett_host(result['Link'], nefarious_settings))
except Exception as e:
logging.info('Exception tracing torrent url: {}'.format(e))
continue
# add torrent to valid search results
logging.info('Valid Match: {} with {} Seeders'.format(result['Title'], result['Seeders']))
populated_results.append(result)
return populated_results
示例8: _phrase_to_regex
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def _phrase_to_regex(phrase):
# Treat whitespace between words as meaning anything other than alphanumeric
# characters.
pattern = r"[^\w--_]+".join(regex.escape(word) for word in phrase.split())
# Treat spaces at the beginning or end of the phrase as matching any
# whitespace character. This makes it easy to select stuff like non-breaking
# space, which occurs frequently in browsers.
# TODO Support newlines. Note that these are frequently implemented as
# separate text nodes in the accessibility tree, so the obvious
# implementation would not work well.
if phrase == " ":
pattern = r"\s"
else:
if phrase.startswith(" "):
pattern = r"\s" + pattern
if phrase.endswith(" "):
pattern = pattern + r"\s"
# Only match at boundaries of alphanumeric sequences if the phrase ends
# are alphanumeric.
if regex.search(r"^[\w--_]", phrase, regex.VERSION1 | regex.UNICODE):
pattern = r"(?<![\w--_])" + pattern
if regex.search(r"[\w--_]$", phrase, regex.VERSION1 | regex.UNICODE):
pattern = pattern + r"(?![\w--_])"
return pattern
示例9: _ibis_sqlite_regex_extract
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def _ibis_sqlite_regex_extract(string, pattern, index):
"""Extract match of regular expression `pattern` from `string` at `index`.
Parameters
----------
string : str
pattern : str
index : int
Returns
-------
result : str or None
"""
result = re.search(pattern, string)
if result is not None and 0 <= index <= (result.lastindex or -1):
return result.group(index)
return None
示例10: _mark_quoted_email_splitlines
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def _mark_quoted_email_splitlines(markers, lines):
"""
When there are headers indented with '>' characters, this method will
attempt to identify if the header is a splitline header. If it is, then we
mark it with 's' instead of leaving it as 'm' and return the new markers.
"""
# Create a list of markers to easily alter specific characters
markerlist = list(markers)
for i, line in enumerate(lines):
if markerlist[i] != 'm':
continue
for pattern in SPLITTER_PATTERNS:
matcher = re.search(pattern, line)
if matcher:
markerlist[i] = 's'
break
return "".join(markerlist)
示例11: get_search_configs
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def get_search_configs():
"""
Defines the configurations for hyper parameter search
"""
configurations.DEFINE_string("template",None,"Template file for hyper-param search")
configurations.DEFINE_string("search_algorithm","genetic","Algorithm for hyper-param optimization. Select from 'genetic', 'grid_search'")
configurations.DEFINE_integer("generations",100,"Number of generations for genetic algorithm")
configurations.DEFINE_integer("pop_size",20,"Population size for genetic algorithm")
configurations.DEFINE_integer("num_survivors",10,"Number of survivors for genetic algorithm")
configurations.DEFINE_integer("num_threads",4,"NUmber of parallel threads (Number of parallel executions)")
configurations.DEFINE_integer("num_gpu",1,"Number of GPU on the machine, Use 0 if there are None")
configurations.DEFINE_integer("sleep_time",1,"Sleep time")
configurations.DEFINE_float("mutate_rate",0.2,"Mutation rate for genetic algorithm")
configurations.DEFINE_string("init_pop",None,"Specify starting population. Path to the pickle file")
c = configurations.ConfigValues()
return c
示例12: get_search_configs
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def get_search_configs():
"""
Defines the configurations for hyper parameter search
"""
configurations.DEFINE_string("template",None,"Template file for hyper-param search")
configurations.DEFINE_string("search_algorithm","genetic","Algorithm for hyper-param optimization. Select from 'genetic', 'grid_search'")
configurations.DEFINE_integer("generations",100,"Number of generations for genetic algorithm")
configurations.DEFINE_integer("pop_size",20,"Population size for genetic algorithm")
configurations.DEFINE_integer("num_survivors",10,"Number of survivors for genetic algorithm")
configurations.DEFINE_integer("num_threads",4,"NUmber of parallel threads (Number of parallel executions)")
configurations.DEFINE_integer("num_gpu",1,"Number of GPU on the machine, Use 0 if there are None")
configurations.DEFINE_integer("sleep_time",1,"Sleep time")
configurations.DEFINE_float("mutate_rate",0.02,"Mutation rate for genetic algorithm")
configurations.DEFINE_string("init_pop",None,"Specify starting population. Path to the pickle file")
c = configurations.ConfigValues()
return c
示例13: generate_results
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def generate_results(pop,gen):
result = list()
for i in range(len(pop)):
filename = output_filename(gen,i)
print("Reading file "+filename)
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
# remove lines w/o error
content = [s for s in content if re.search('MSE',s)]
errors = [float(s.split()[_VALID_ERR_IDX]) for s in content]
if len(errors) > 0:
errors.sort()
result.append(errors[0])
else:
result.append(float('inf'))
if result[-1] == 'nan':
result[-1] = float('inf')
print("-"*80)
print(result)
assert(len(pop) == len(result))
return result
示例14: validate
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def validate(self, document):
text = document.text.split()
if re.search(r"^(!|#|\?)", document.text):
pass
elif len(text) > 1:
if not text[-2].startswith("--"):
if (
not re.search(r"\"|'", text[-1])
and not text[-1].startswith("--")
and text[-1] not in list(get_options().keys())
):
raise ValidationError(
cursor_position=1,
message="{text} is not a valid Chepy method".format(
text=text[-1]
),
)
示例15: count_occurances
# 需要导入模块: import regex [as 别名]
# 或者: from regex import search [as 别名]
def count_occurances(self, regex: str, case_sensitive: bool = False):
"""Counts occurances of the regex.
Counts the number of times the provided string occurs.
Args:
regex (str): Required. Regex string to search for
case_sensitive (bool, optional): If search should be case insensitive, by default False
Returns:
Chepy: The Chepy object.
Examples:
>>> Chepy("AABCDADJAKDJHKSDAJSDdaskjdhaskdjhasdkja").count_occurances("ja").output
2
"""
if case_sensitive:
r = re.compile(regex)
else:
r = re.compile(regex, re.IGNORECASE)
self.state = len(r.findall(self._convert_to_str()))
return self