本文整理汇总了Python中token.NAME属性的典型用法代码示例。如果您正苦于以下问题:Python token.NAME属性的具体用法?Python token.NAME怎么用?Python token.NAME使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类token
的用法示例。
在下文中一共展示了token.NAME属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: comparison
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
示例2: evaluate
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
示例3: _getname
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def _getname(g):
# Helper to get a dotted name, return a pair (name, token) where
# name is the dotted name, or None if there was no dotted name,
# and token is the next input token.
parts = []
tokentype, token = g.next()[0:2]
if tokentype != NAME and token != '*':
return (None, token)
parts.append(token)
while True:
tokentype, token = g.next()[0:2]
if token != '.':
break
tokentype, token = g.next()[0:2]
if tokentype != NAME:
break
parts.append(token)
return (".".join(parts), token)
示例4: classdef
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def classdef(self, nodelist):
# classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
name = nodelist[1][1]
doc = self.get_docstring(nodelist[-1])
if nodelist[2][0] == token.COLON:
bases = []
elif nodelist[3][0] == token.RPAR:
bases = []
else:
bases = self.com_bases(nodelist[3])
# code for class
code = self.com_node(nodelist[-1])
if doc is not None:
assert isinstance(code, Stmt)
assert isinstance(code.nodes[0], Discard)
del code.nodes[0]
return Class(name, bases, doc, code, lineno=nodelist[1][2])
示例5: com_argument
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def com_argument(self, nodelist, kw, star_node):
if len(nodelist) == 3 and nodelist[2][0] == symbol.comp_for:
test = self.com_node(nodelist[1])
return 0, self.com_generator_expression(test, nodelist[2])
if len(nodelist) == 2:
if kw:
raise SyntaxError, "non-keyword arg after keyword arg"
if star_node:
raise SyntaxError, "only named arguments may follow *expression"
return 0, self.com_node(nodelist[1])
result = self.com_node(nodelist[3])
n = nodelist[1]
while len(n) == 2 and n[0] != token.NAME:
n = n[1]
if n[0] != token.NAME:
raise SyntaxError, "keyword can't be an expression (%s)"%n[0]
node = Keyword(n[1], result, lineno=n[2])
return 1, node
示例6: comparison
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
示例7: evaluate
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
示例8: Annotate
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def Annotate(cls, nodes):
if not nodes:
return None
if nodes[0].type != symbol.atom:
return None
if not nodes[0].children or nodes[0].children[0].type != token.NAME:
return None
for i in xrange(1, len(nodes)):
if not nodes:
break
if nodes[i].type != symbol.trailer:
break
if len(nodes[i].children) != 2:
break
if (nodes[i].children[0].type != token.DOT or
nodes[i].children[1].type != token.NAME):
break
else:
i = len(nodes)
return [cls(nodes[:i])] + nodes[i:]
示例9: value
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def value(self, value):
value_parts = value.split('.')
# If we have too many children, cut the list down to size.
# pylint: disable=attribute-defined-outside-init
self._children = self._children[:len(value_parts)]
# Update child nodes.
for child, value_part in itertools.izip_longest(
self._children, value_parts):
if child:
# Modify existing children. This helps preserve comments and spaces.
child.children[-1].value = value_part
else:
# Add children as needed.
token_snippets = [
snippet.TokenSnippet.Create(token.DOT, '.'),
snippet.TokenSnippet.Create(token.NAME, value_part),
]
self._children.append(snippet.Symbol(symbol.trailer, token_snippets))
示例10: value
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def value(self, value):
value_parts = value.split('.')
for value_part in value_parts:
if keyword.iskeyword(value_part):
raise ValueError('%s is a reserved keyword.' % value_part)
# If we have too many children, cut the list down to size.
# pylint: disable=attribute-defined-outside-init
self._children = self._children[:len(value_parts)*2-1]
# Update child nodes.
for child, value_part in itertools.izip_longest(
self._children[::2], value_parts):
if child:
# Modify existing children. This helps preserve comments and spaces.
child.value = value_part
else:
# Add children as needed.
self._children.append(snippet.TokenSnippet.Create(token.DOT, '.'))
self._children.append(
snippet.TokenSnippet.Create(token.NAME, value_part))
示例11: alias
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def alias(self, value):
if keyword.iskeyword(value):
raise ValueError('%s is a reserved keyword.' % value)
if value:
# pylint: disable=access-member-before-definition
if len(self.children) < 3:
# If we currently have no alias, add one.
# pylint: disable=access-member-before-definition
self.children.append(
snippet.TokenSnippet.Create(token.NAME, 'as', (0, 1)))
# pylint: disable=access-member-before-definition
self.children.append(
snippet.TokenSnippet.Create(token.NAME, value, (0, 1)))
else:
# We already have an alias. Just update the value.
# pylint: disable=access-member-before-definition
self.children[2].value = value
else:
# Removing the alias. Strip the "as foo".
self.children = [self.children[0]] # pylint: disable=line-too-long, attribute-defined-outside-init
示例12: name_
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def name_(string):
return skip(
some(lambda tok: tok.type == token.NAME and tok.string == string))
示例13: check_for_wrong_tuple
# 需要导入模块: import token [as 别名]
# 或者: from token import NAME [as 别名]
def check_for_wrong_tuple(tree, code, noqa):
errors = []
candidates = []
for assign in ast.walk(tree):
if not isinstance(assign, (ast.Assign, ast.Return)):
continue
elif assign.lineno in noqa:
continue
elif isinstance(assign.value, ast.Call):
continue
for tuple_el in ast.walk(assign):
if isinstance(tuple_el, ast.Tuple) and len(tuple_el.elts) == 1:
candidates.append((assign.lineno, assign.col_offset))
break
if not candidates:
return []
for candidate in candidates:
number_nl = 0 # account for logical newlines within statements
tokens = tokenize.generate_tokens(
lambda L=iter(code): next(L)
)
previous_token = None
for t in tokens:
if previous_token is not None and previous_token.type == tokenize.NEWLINE:
number_nl = 0
x = TokenInfo(*t)
if x.start[0] - number_nl != candidate[0]:
previous_token = x
continue
if x.type == tokenize.NL:
number_nl += 1
if x.type == token.NEWLINE and ending_of_bad_tuple(previous_token):
errors.append(x.start)
if x.type == token.OP and x.string == '=' and previous_token.type != token.NAME:
x = TokenInfo(*next(tokens))
if x.type != token.OP and x.string != '(':
x_next = TokenInfo(*next(tokens))
if ending_of_bad_tuple(x_next):
errors.append(x.start)
previous_token = x
return errors