本文整理匯總了Python中token.STRING屬性的典型用法代碼示例。如果您正苦於以下問題:Python token.STRING屬性的具體用法?Python token.STRING怎麽用?Python token.STRING使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類token
的用法示例。
在下文中一共展示了token.STRING屬性的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: evaluate
# 需要導入模塊: import token [as 別名]
# 或者: from token import STRING [as 別名]
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
示例2: find_strings
# 需要導入模塊: import token [as 別名]
# 或者: from token import STRING [as 別名]
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
示例3: evaluate
# 需要導入模塊: import token [as 別名]
# 或者: from token import STRING [as 別名]
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
示例4: _find_strings
# 需要導入模塊: import token [as 別名]
# 或者: from token import STRING [as 別名]
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
示例5: process_tokens
# 需要導入模塊: import token [as 別名]
# 或者: from token import STRING [as 別名]
def process_tokens(self, tokens):
# Module docstring can be a native string.
# Also use as a flag to notice when __future__ statements are no longer
# valid to avoid wasting time checking every NAME token
# (which is < STRING).
module_start = True
line_num = 1
for type_, val, start, end, line in tokens:
if type_ in (token.NEWLINE, tokenize.NL):
line_num += 1
# Anything else means we are past the first string in the module,
# any comments (e.g. shebang), and no more __future__ statements
# are possible.
if type_ > token.NEWLINE and type_ < token.N_TOKENS:
module_start = False
elif type_ == token.STRING:
line_num += val.count('\n')
if not module_start and not val.startswith(('u', 'b')):
self.add_message('native-string', line=line_num)
elif module_start and type_ == token.NAME:
if len(line) >= 39: # Fast-fail check
if u'__future__' in line and u'unicode_literals' in line:
return