本文整理汇总了Python中string.whitespace方法的典型用法代码示例。如果您正苦于以下问题:Python string.whitespace方法的具体用法?Python string.whitespace怎么用?Python string.whitespace使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类string
的用法示例。
在下文中一共展示了string.whitespace方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _reject_surrounding_whitespace
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def _reject_surrounding_whitespace(headers, hdr_validation_flags):
"""
Raises a ProtocolError if any header name or value is surrounded by
whitespace characters.
"""
# For compatibility with RFC 7230 header fields, we need to allow the field
# value to be an empty string. This is ludicrous, but technically allowed.
# The field name may not be empty, though, so we can safely assume that it
# must have at least one character in it and throw exceptions if it
# doesn't.
for header in headers:
if header[0][0] in _WHITESPACE or header[0][-1] in _WHITESPACE:
raise ProtocolError(
"Received header name surrounded by whitespace %r" % header[0])
if header[1] and ((header[1][0] in _WHITESPACE) or
(header[1][-1] in _WHITESPACE)):
raise ProtocolError(
"Received header value surrounded by whitespace %r" % header[1]
)
yield header
示例2: _NextTok
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def _NextTok(str, pos):
# Returns (token, endPos)
end = len(str)
if pos>=end: return None, 0
while pos < end and str[pos] in string.whitespace:
pos = pos + 1
# Special case for +-
if str[pos] in '+-':
return str[pos],pos+1
# Digits also a special case.
endPos = pos
while endPos < end and str[endPos] in string.digits+".":
endPos = endPos + 1
if pos!=endPos: return str[pos:endPos], endPos
endPos = pos
while endPos < end and str[endPos] not in string.whitespace + string.digits + "+-":
endPos = endPos + 1
if pos!=endPos: return str[pos:endPos], endPos
return None, 0
示例3: originalTextFor
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
return matchExpr
示例4: _strip_surrounding_whitespace
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def _strip_surrounding_whitespace(headers, hdr_validation_flags):
"""
Given an iterable of header two-tuples, strip both leading and trailing
whitespace from both header names and header values. This generator
produces tuples that preserve the original type of the header tuple for
tuple and any ``HeaderTuple``.
"""
for header in headers:
if isinstance(header, HeaderTuple):
yield header.__class__(header[0].strip(), header[1].strip())
else:
yield (header[0].strip(), header[1].strip())
示例5: generic_tokenize
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def generic_tokenize(characters):
"""Default tokenizer
>>> list(generic_tokenize('7/17/18 3:00 p.m.'))
['7/17/18', '3:00', 'p.m.']
>>> list(generic_tokenize('July 17, 2018 at 3p.m.'))
['July', '17', ',', '2018', 'at', '3', 'p.m.']
>>> list(generic_tokenize('July 17, 2018 3 p.m.'))
['July', '17', ',', '2018', '3', 'p.m.']
>>> list(generic_tokenize('3PM on July 17'))
['3', 'PM', 'on', 'July', '17']
>>> list(generic_tokenize('tomorrow noon,Wed 3 p.m.,Fri 11 AM'))
['tomorrow', 'noon', ',', 'Wed', '3', 'p.m.', ',', 'Fri', '11', 'AM']
"""
token = ''
punctuation = ''
last_type = None
for character in characters:
type = get_character_type(character)
is_different_type = None not in (type, last_type) and type != last_type \
and 'punctuation' not in (type, last_type)
is_skip_character = character in string.whitespace
is_break_character = character in ','
if is_skip_character or is_different_type or is_break_character:
if token:
yield token
token = ''
token = character if not is_skip_character else ''
if is_break_character:
yield token
token = ''
last_type = type
continue
token += character
last_type = type
yield token
示例6: str2list
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def str2list(rawstr):
rawlist = rawstr.strip(string.whitespace).split(' ')
# Remove space elements in list.
cleanlist = [x for x in rawlist if x != ' ' and x != '']
return cleanlist
示例7: ifreq
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def ifreq(self):
"""
返回频率信息字典的迭代器。
Return frequency iterator to generating frequency related data.
"""
with open(self.filename, "r") as f:
collecting = False
for line in f:
freq = self.freq_regex.match(line)
title = self.title_regex.match(line)
empty_line = (line.strip(whitespace) == "")
if freq:
freq_data = list(freq.groups())
# Collect start.
if title and not collecting:
collecting = True
coords, deltas = [], []
# Collect stop.
elif empty_line and collecting:
collecting = False
freq_data.append(coords)
freq_data.append(deltas)
freq_dict = dict(zip(self.freq_info, freq_data))
yield freq_dict
# Collect data.
elif collecting:
x, y, z, dx, dy, dz = line2list(line)
coord = (x, y, z)
delta = (dx, dy, dz)
coords.append(coord)
deltas.append(delta)
示例8: load
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def load(self):
"Rewrite load method"
PosCar.load(self)
with open(self.filename, 'r') as f:
for i in range(self.totline):
f.readline()
#get dimension of 3d array
grid = f.readline().strip(whitespace)
empty = not grid # empty row
while empty:
grid = f.readline().strip(whitespace)
empty = not grid
x, y, z = line2list(grid, dtype=int)
#read electron localization function data
elf_data = []
for line in f:
datalist = line2list(line)
elf_data.extend(datalist)
#########################################
# #
# !!! Notice !!! #
# NGX is the length of the **0th** axis #
# NGY is the length of the **1st** axis #
# NGZ is the length of the **2nd** axis #
# #
#########################################
#reshape to 3d array
elf_data = np.array(elf_data).reshape((x, y, z), order='F')
#set attrs
self.grid = x, y, z
self.elf_data = elf_data
return
示例9: setDefaultWhitespaceChars
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
示例10: leaveWhitespace
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
示例11: setWhitespaceChars
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
示例12: parseImpl
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
示例13: __init__
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
示例14: originalTextFor
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
示例15: _validate_expression_abstract
# 需要导入模块: import string [as 别名]
# 或者: from string import whitespace [as 别名]
def _validate_expression_abstract(self, expression, inputs):
# We are expecting an expression of the form (lambda (<args>) (exp))
# so remove the whitespace, split by left parens, check the first
# token is spaces, the second token is "lambda", the third token is the
# the arguments. Note this will fail if (exp) is just exp i.e. 1.
expression = expression.replace('\n', ' ')
tokens = expression.split('(')
# assert all(t in ['', ' '] for t in tokens[0])
assert tokens[1].strip() == 'lambda'
arguments = tokens[2]
assert len([i for i in arguments if i ==')']) == 1
arguments = arguments.replace(')', '')
arguments = arguments.split()
assert len(arguments) == len(inputs)