本文整理汇总了Python中re.match函数的典型用法代码示例。如果您正苦于以下问题:Python match函数的具体用法?Python match怎么用?Python match使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了match函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_line
def process_line(line, extra_tags):
line = re.sub(' *#.*$', '', line) # remove comments
line = re.sub('-$', '', line)
if not ' ' in line or re.match('.*[а-яіїєґ]/.*', line):
out_line = line
elif re.match('^[^ ]+ [^ ]+ [^:]?[a-z].*$', line):
out_line = line
elif re.match('^[^ ]+ [:^<a-z0-9_].*$', line):
out_line = re.sub('^([^ ]+) ([^<a-z].*)$', '\\1 \\1 \\2', line)
else:
print('hit-', line, file=sys.stderr)
base = re.findall('^[^ ]+', line)[0]
out_line = re.sub('([^ ]+) ?', '\\1 ' + base + ' unknown' + extra_tags + '\n', line)
return out_line[:-1]
# if extra_tags != '' and not re.match('.* [a-z].*$', out_line):
if extra_tags != '' and (not ' ' in out_line or ' ^' in out_line):
extra_tags = ' ' + extra_tags
if '|' in out_line:
out_line = out_line.replace('|', extra_tags + '|')
# if not "/" in out_line and not re.match("^[^ ]+ [^ ]+ [^ ]+$", out_line + extra_tags):
# print("bad line:", out_line + extra_tags, file=sys.stderr)
# if len(out_line)> 100:
# print(out_line, file=sys.stderr)
# sys.exit(1)
return out_line + extra_tags
示例2: __init__
def __init__(self, host, debugfunc=None):
if isinstance(host, types.TupleType):
host, self.weight = host
else:
self.weight = 1
# parse the connection string
m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
if not m:
m = re.match(r'^(?P<proto>inet):'
r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m: m = re.match(r'^(?P<host>[^:]+):(?P<port>[0-9]+)$', host)
if not m:
raise ValueError('Unable to parse connection string: "%s"' % host)
hostData = m.groupdict()
if hostData.get('proto') == 'unix':
self.family = socket.AF_UNIX
self.address = hostData['path']
else:
self.family = socket.AF_INET
self.ip = hostData['host']
self.port = int(hostData.get('port', 11211))
self.address = ( self.ip, self.port )
if not debugfunc:
debugfunc = lambda x: x
self.debuglog = debugfunc
self.deaduntil = 0
self.socket = None
self.buffer = ''
示例3: importAuto
def importAuto(cls, string, path=None, activeFit=None, callback=None, encoding=None):
# Get first line and strip space symbols of it to avoid possible detection errors
firstLine = re.split("[\n\r]+", string.strip(), maxsplit=1)[0]
firstLine = firstLine.strip()
# If XML-style start of tag encountered, detect as XML
if re.match("<", firstLine):
if encoding:
return "XML", cls.importXml(string, callback, encoding)
else:
return "XML", cls.importXml(string, callback)
# If JSON-style start, parse as CREST/JSON
if firstLine[0] == '{':
return "JSON", (cls.importCrest(string),)
# If we've got source file name which is used to describe ship name
# and first line contains something like [setup name], detect as eft config file
if re.match("\[.*\]", firstLine) and path is not None:
filename = os.path.split(path)[1]
shipName = filename.rsplit('.')[0]
return "EFT Config", cls.importEftCfg(shipName, string, callback)
# If no file is specified and there's comma between brackets,
# consider that we have [ship, setup name] and detect like eft export format
if re.match("\[.*,.*\]", firstLine):
return "EFT", (cls.importEft(string),)
# Use DNA format for all other cases
return "DNA", (cls.importDna(string),)
示例4: _apache_index
def _apache_index(self, url):
r = requests.get(url)
if r.status_code != 200:
raise ValueError(url+" status:"+str(r.status_code))
r.dirs = []
r.files = []
for l in r.content.split("\n"):
# '<img src="/icons/folder.png" alt="[DIR]" /> <a href="7.0/">7.0/</a> 03-Dec-2014 19:57 - '
# ''<img src="/icons/tgz.png" alt="[ ]" /> <a href="owncloud_7.0.4-2.diff.gz">owncloud_7.0.4-2.diff.gz</a> 09-Dec-2014 16:53 9.7K <a href="owncloud_7.0.4-2.diff.gz.mirrorlist">Details</a>'
#
m = re.search("<a\s+href=[\"']?([^>]+?)[\"']?>([^<]+?)[\"']?</a>\s*([^<]*)", l, re.I)
if m:
# ('owncloud_7.0.4-2.diff.gz', 'owncloud_7.0.4-2.diff.gz', '09-Dec-2014 16:53 9.7K ')
m1,m2,m3 = m.groups()
if re.match("(/|\?|\w+://)", m1): # skip absolute urls, query strings and foreign urls
continue
if re.match("\.?\./?$", m1): # skip . and ..
continue
m3 = re.sub("[\s-]+$", "", m3)
if re.search("/$", m1):
r.dirs.append([m1, m3])
else:
r.files.append([m1, m3])
return r
示例5: area_code_lookup
def area_code_lookup(request, area_id, format):
from mapit.models import Area, CodeType
area_code = None
if re.match('\d\d([A-Z]{2}|[A-Z]{4}|[A-Z]{2}\d\d\d|[A-Z]|[A-Z]\d\d)$', area_id):
area_code = CodeType.objects.get(code='ons')
elif re.match('[EW]0[12]\d{6}$', area_id): # LSOA/MSOA have ONS code type
area_code = CodeType.objects.get(code='ons')
elif re.match('[ENSW]\d{8}$', area_id):
area_code = CodeType.objects.get(code='gss')
if not area_code:
return None
args = { 'format': format, 'codes__type': area_code, 'codes__code': area_id }
if re.match('[EW]01', area_id):
args['type__code'] = 'OLF'
elif re.match('[EW]02', area_id):
args['type__code'] = 'OMF'
area = get_object_or_404(Area, **args)
path = '/area/%d%s' % (area.id, '.%s' % format if format else '')
# If there was a query string, make sure it's passed on in the
# redirect:
if request.META['QUERY_STRING']:
path += "?" + request.META['QUERY_STRING']
return HttpResponseRedirect(path)
示例6: checkInCNAME
def checkInCNAME(node_text, nodes):
try:
InCNAME = re.search("IN CNAME (.*)", node_text)
alias = InCNAME.group(0).split("IN CNAME ")[1]
#IP address found
if re.match("(\d{1,3}\.)", alias):
return alias
# cname is a subdomain
elif re.match(".*[a-x]\.", alias):
return ("subdomain found (" + alias + ")")
#cname is another cname
else:
try:
alias_name = dns.name.Name([alias])
alias_IP = nodes[alias_name].to_text(alias_name)
checkCname = checkInA(alias_IP)
if checkCname is None:
return checkInCNAME(alias_IP, nodes)
else:
return checkCname
except:
return (Fore.RED + "unknown host (" + alias + ")" + Fore.RESET)
# node has no IN CNAME
except:
return None
示例7: parse_requirements
def parse_requirements(requirements_file='requirements.txt'):
requirements = []
with open(requirements_file, 'r') as f:
for line in f:
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# -r lines are for including other files, and don't get used here
elif re.match(r'\s*-r\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line.strip())
return requirements
示例8: parse_report
def parse_report(path):
""" Return the volume informations contained in the SIENAX report. This
is a dictionary with keys "grey", "white", and "brain".
The informations for the different tissues is a dictionary with the
normalized and raw values, in cubic millimeters.
adapted from: http://code.google.com/p/medipy/source/browse/plugins/fsl/sienax.py
see licence: http://code.google.com/p/medipy/source/browse/LICENSE
"""
report = {}
fd = open(path)
for line in fd.readlines() :
for tissue in ["GREY", "WHITE", "BRAIN"] :
pattern = tissue + r"\s+([\d+\.]+)\s+([\d+\.]+)"
measure = re.match(pattern, line)
if measure :
normalized = float(measure.group(1))
raw = float(measure.group(2))
report[tissue.lower()] = {"normalized" : normalized, "raw" : raw}
continue
vscale = re.match("VSCALING ([\d\.]+)", line)
if vscale :
report["vscale"] = float(vscale.group(1))
return report
示例9: check_api_version_decorator
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield(0, msg)
示例10: parse_template
def parse_template(template_name):
"""Given a template name, attempt to extract its group name and upload date
Returns:
* None if no groups matched
* group_name, datestamp of the first matching group. group name will be a string,
datestamp with be a :py:class:`datetime.date <python:datetime.date>`, or None if
a date can't be derived from the template name
"""
for group_name, regex in stream_matchers:
matches = re.match(regex, template_name)
if matches:
groups = matches.groupdict()
# hilarity may ensue if this code is run right before the new year
today = date.today()
year = int(groups.get('year', today.year))
month, day = int(groups['month']), int(groups['day'])
# validate the template date by turning into a date obj
template_date = futurecheck(date(year, month, day))
return TemplateInfo(group_name, template_date, True)
for group_name, regex in generic_matchers:
matches = re.match(regex, template_name)
if matches:
return TemplateInfo(group_name, None, False)
# If no match, unknown
return TemplateInfo('unknown', None, False)
示例11: parse
def parse(fh):
stats = []
for line in fh:
m = re.match(r'TRANSLATION\s+(?P<content>.*)\n', line)
if not m:
continue
line = m.group('content')
m = re.match(r'(?P<group>[[email protected]]+):', line)
if not m:
sys.stderr.write('Malformed TRANSLATION line: %s\n' % line)
continue
stat = {'group': m.group('group')}
if stat['group'] == 'total':
continue
else:
sum = 0
for x in stat_types:
m = re.search(r'\b(?P<count>\d+) %s (message|translation)' % x,
line)
if m:
stat[x] = int(m.group('count'))
sum += stat[x]
stat['total'] = sum
stats.append(stat)
return stats
示例12: readFile
def readFile(fileV4, fileV6, trie):
# open ipv4 file
input = open(fileV4, "r")
pattern = '(\d+)\,(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/(\d{1,2}).*'
for line in input:
result = re.match(pattern, line)
if result:
address = result.group(2)
length = result.group(3)
asn = result.group(1)
update = True
withdrawal = False
count = 0
insertTrie(trie, address, length, asn, update, withdrawal, count)
# open ipv6 file
input = open(fileV6, "r")
pattern = '(\d+)\,(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]).){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]).){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))/(\d{1,3}),.*'
for line in input:
result = re.match(pattern, line)
if result:
address = result.group(2)
length = result.group(32)
asn = result.group(1)
update = True
withdrawal = False
count = 0
insertTrie(trie, address, length, asn, update, withdrawal, count)
return trie
示例13: history
def history(self, page):
GIT_COMMIT_FIELDS = ["commit", "author", "date", "date_relative", "message"]
GIT_LOG_FORMAT = "%x1f".join(["%h", "%an", "%ad", "%ar", "%s"]) + "%x1e"
output = git.log("--format=%s" % GIT_LOG_FORMAT, "--follow", "-z", "--shortstat", page.abspath)
output = output.split("\n")
history = []
for line in output:
if "\x1f" in line:
log = line.strip("\x1e\x00").split("\x1f")
history.append(dict(zip(GIT_COMMIT_FIELDS, log)))
else:
insertion = re.match(r".* (\d+) insertion", line)
deletion = re.match(r".* (\d+) deletion", line)
history[-1]["insertion"] = int(insertion.group(1)) if insertion else 0
history[-1]["deletion"] = int(deletion.group(1)) if deletion else 0
max_changes = float(max([(v["insertion"] + v["deletion"]) for v in history])) or 1.0
for v in history:
v.update(
{
"insertion_relative": str((v["insertion"] / max_changes) * 100),
"deletion_relative": str((v["deletion"] / max_changes) * 100),
}
)
return history
示例14: filter_services
def filter_services(svcs):
filtered = []
# filter includes
if _args['--has']:
for sv in svcs:
for inc in _args['--has']:
if inc in sv["tags"] and sv not in filtered:
filtered.append(sv)
if _args['--match']:
for sv in svcs:
for regex in _args['--match']:
for tag in sv["tags"]:
if re.match(regex, tag) and sv not in filtered:
filtered.append(sv)
if not filtered and not _args['--has'] and not _args['--match']:
filtered = svcs
if _args['--has-not']:
for sv in list(filtered): # operate on a copy, otherwise .remove would change the list under our feet
for exc in _args['--has-not']:
if exc in sv["tags"]:
filtered.remove(sv)
if _args['--no-match']:
for sv in list(filtered):
for regex in _args['--no-match']:
for tag in sv["tags"]:
if re.match(regex, tag) and sv in list(filtered):
filtered.remove(sv)
return filtered
示例15: main
def main():
f = open("makefile2wrappers.txt","r");
lins = f.readlines();
f.close();
for l in lins:
l = l.strip();
if len(l)==0:
continue;
print('Line: '+l);
# $(C) -DDINT -c ../Source/umf_analyze.c -o umf_i_analyze.o
defs=re.match(".*\)(.*)-c",l).group(1).strip();
# If there's no "-o" flag, just compile the file as is:
if re.search('.*-o.*',l)!=None:
src=re.match(".*-c(.*)-o",l).group(1).strip();
out=re.match(".*-o(.*)",l).group(1).strip();
f='SourceWrappers/'+out+".c";
print(' => Creating '+f+'\n');
o = open(f,"w");
DEFs = defs.strip().split("-D");
DEFs = [x for x in DEFs if x]; # Remove empty
for d in DEFs:
o.write('#define '+d+'\n');
o.write('#include <'+src+'>'+'\n');
o.close();
else:
src=re.match(".*-c(.*)",l).group(1).strip();
f = "SourceWrappers/"+os.path.basename(src);
print(' => Creating '+f+'\n');
o = open(f,"w");
o.write('#include <'+src+'>'+'\n');
o.close();
return 0