本文整理汇总了Python中token.STRING属性的典型用法代码示例。如果您正苦于以下问题:Python token.STRING属性的具体用法?Python token.STRING怎么用?Python token.STRING使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类token
的用法示例。
在下文中一共展示了token.STRING属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate
# 需要导入模块: import token [as 别名]
# 或者: from token import STRING [as 别名]
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
示例2: find_strings
# 需要导入模块: import token [as 别名]
# 或者: from token import STRING [as 别名]
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
示例3: evaluate
# 需要导入模块: import token [as 别名]
# 或者: from token import STRING [as 别名]
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
示例4: _find_strings
# 需要导入模块: import token [as 别名]
# 或者: from token import STRING [as 别名]
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
示例5: process_tokens
# 需要导入模块: import token [as 别名]
# 或者: from token import STRING [as 别名]
def process_tokens(self, tokens):
# Module docstring can be a native string.
# Also use as a flag to notice when __future__ statements are no longer
# valid to avoid wasting time checking every NAME token
# (which is < STRING).
module_start = True
line_num = 1
for type_, val, start, end, line in tokens:
if type_ in (token.NEWLINE, tokenize.NL):
line_num += 1
# Anything else means we are past the first string in the module,
# any comments (e.g. shebang), and no more __future__ statements
# are possible.
if type_ > token.NEWLINE and type_ < token.N_TOKENS:
module_start = False
elif type_ == token.STRING:
line_num += val.count('\n')
if not module_start and not val.startswith(('u', 'b')):
self.add_message('native-string', line=line_num)
elif module_start and type_ == token.NAME:
if len(line) >= 39: # Fast-fail check
if u'__future__' in line and u'unicode_literals' in line:
return