本文整理汇总了Golang中go/token.Position.Line方法的典型用法代码示例。如果您正苦于以下问题:Golang Position.Line方法的具体用法?Golang Position.Line怎么用?Golang Position.Line使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类go/token.Position
的用法示例。
在下文中一共展示了Position.Line方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestScan
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
// make source
src_linecount := newlineCount(string(source))
whitespace_linecount := newlineCount(whitespace)
// verify scan
var s Scanner
s.Init(fset.AddFile("", fset.Base(), len(source)), source, &testErrorHandler{t}, ScanComments|dontInsertSemis)
index := 0
epos := token.Position{"", 0, 1, 1} // expected position
for {
pos, tok, lit := s.Scan()
if lit == "" {
// no literal value for non-literal tokens
lit = tok.String()
}
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
}
if tok == token.EOF {
lit = "<EOF>"
epos.Line = src_linecount
epos.Column = 2
}
checkPos(t, lit, pos, epos)
if tok != e.tok {
t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
}
if e.tok.IsLiteral() {
// no CRs in raw string literals
elit := e.lit
if elit[0] == '`' {
elit = string(stripCR([]byte(elit)))
epos.Offset += len(e.lit) - len(lit) // correct position
}
if lit != elit {
t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
}
}
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
}
epos.Offset += len(lit) + len(whitespace)
epos.Line += newlineCount(lit) + whitespace_linecount
if tok == token.COMMENT && lit[1] == '/' {
// correct for unaccounted '/n' in //-style comment
epos.Offset++
epos.Line++
}
index++
if tok == token.EOF {
break
}
}
if s.ErrorCount != 0 {
t.Errorf("found %d errors", s.ErrorCount)
}
}
示例2: TestScan
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
// make source
var src string
for _, e := range tokens {
src += e.lit + whitespace
}
src_linecount := newlineCount(src) + 1
whitespace_linecount := newlineCount(whitespace)
// verify scan
var s Scanner
s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorHandler{t}, ScanComments)
index := 0
epos := token.Position{"", 0, 1, 1} // expected position
for {
pos, tok, litb := s.Scan()
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
}
lit := string(litb)
if tok == token.EOF {
lit = "<EOF>"
epos.Line = src_linecount
epos.Column = 1
}
checkPos(t, lit, pos, epos)
if tok != e.tok {
t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
}
if e.tok.IsLiteral() && lit != e.lit {
t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
}
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
}
epos.Offset += len(lit) + len(whitespace)
epos.Line += newlineCount(lit) + whitespace_linecount
if tok == token.COMMENT && litb[1] == '/' {
// correct for unaccounted '/n' in //-style comment
epos.Offset++
epos.Line++
}
index++
if tok == token.EOF {
break
}
}
if s.ErrorCount != 0 {
t.Errorf("found %d errors", s.ErrorCount)
}
}
示例3: TestScan
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
// make source
var src string
for _, e := range tokens {
src += e.lit + whitespace
}
src_linecount := newlineCount(src)
whitespace_linecount := newlineCount(whitespace)
// verify scan
index := 0
epos := token.Position{"", 0, 1, 1} // expected position
nerrors := Tokenize("", []byte(src), &testErrorHandler{t}, ScanComments,
func(pos token.Position, tok token.Token, litb []byte) bool {
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
}
lit := string(litb)
if tok == token.EOF {
lit = "<EOF>"
epos.Line = src_linecount
epos.Column = 1
}
checkPos(t, lit, pos, epos)
if tok != e.tok {
t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
}
if e.tok.IsLiteral() && lit != e.lit {
t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
}
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
}
epos.Offset += len(lit) + len(whitespace)
epos.Line += newlineCount(lit) + whitespace_linecount
if tok == token.COMMENT && litb[1] == '/' {
// correct for unaccounted '/n' in //-style comment
epos.Offset++
epos.Line++
}
index++
return tok != token.EOF
})
if nerrors != 0 {
t.Errorf("found %d errors", nerrors)
}
}
示例4: NewCommentMap
// NewCommentMap creates a new comment map by associating comment groups
// of the comments list with the nodes of the AST specified by node.
//
// A comment group g is associated with a node n if:
//
// - g starts on the same line as n ends
// - g starts on the line immediately following n, and there is
// at least one empty line after g and before the next node
// - g starts before n and is not associated to the node before n
// via the previous rules
//
// NewCommentMap tries to associate a comment group to the "largest"
// node possible: For instance, if the comment is a line comment
// trailing an assignment, the comment is associated with the entire
// assignment rather than just the last operand in the assignment.
//
func NewCommentMap(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap {
if len(comments) == 0 {
return nil // no comments to map
}
cmap := make(CommentMap)
// set up comment reader r
tmp := make([]*CommentGroup, len(comments))
copy(tmp, comments) // don't change incoming comments
sortComments(tmp)
r := commentListReader{fset: fset, list: tmp} // !r.eol() because len(comments) > 0
r.next()
// create node list in lexical order
nodes := nodeList(node)
nodes = append(nodes, nil) // append sentinel
// set up iteration variables
var (
p Node // previous node
pend token.Position // end of p
pg Node // previous node group (enclosing nodes of "importance")
pgend token.Position // end of pg
stack nodeStack // stack of node groups
)
for _, q := range nodes {
var qpos token.Position
if q != nil {
qpos = fset.Position(q.Pos()) // current node position
} else {
// set fake sentinel position to infinity so that
// all comments get processed before the sentinel
const infinity = 1 << 30
qpos.Offset = infinity
qpos.Line = infinity
}
// process comments before current node
for r.end.Offset <= qpos.Offset {
// determine recent node group
if top := stack.pop(r.comment.Pos()); top != nil {
pg = top
pgend = fset.Position(pg.End())
}
// Try to associate a comment first with a node group
// (i.e., a node of "importance" such as a declaration);
// if that fails, try to associate it with the most recent
// node.
// TODO(gri) try to simplify the logic below
var assoc Node
switch {
case pg != nil &&
(pgend.Line == r.pos.Line ||
pgend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line):
// 1) comment starts on same line as previous node group ends, or
// 2) comment starts on the line immediately after the
// previous node group and there is an empty line before
// the current node
// => associate comment with previous node group
assoc = pg
case p != nil &&
(pend.Line == r.pos.Line ||
pend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line ||
q == nil):
// same rules apply as above for p rather than pg,
// but also associate with p if we are at the end (q == nil)
assoc = p
default:
// otherwise, associate comment with current node
if q == nil {
// we can only reach here if there was no p
// which would imply that there were no nodes
panic("internal error: no comments should be associated with sentinel")
}
assoc = q
}
cmap.addComment(assoc, r.comment)
if r.eol() {
return cmap
}
r.next()
}
//.........这里部分代码省略.........
示例5: TestScan
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
whitespace_linecount := newlineCount(whitespace)
// error handler
eh := func(_ token.Position, msg string) {
t.Errorf("error handler called (msg = %s)", msg)
}
// verify scan
var s Scanner
s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)
// set up expected position
epos := token.Position{
Filename: "",
Offset: 0,
Line: 1,
Column: 1,
}
index := 0
for {
pos, tok, lit := s.Scan()
// check position
if tok == token.EOF {
// correction for EOF
epos.Line = newlineCount(string(source))
epos.Column = 2
}
checkPos(t, lit, pos, epos)
// check token
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
index++
}
if tok != e.tok {
t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
}
// check token class
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
}
// check literal
elit := ""
switch e.tok {
case token.COMMENT:
// no CRs in comments
elit = string(stripCR([]byte(e.lit)))
//-style comment literal doesn't contain newline
if elit[1] == '/' {
elit = elit[0 : len(elit)-1]
}
case token.IDENT:
elit = e.lit
case token.SEMICOLON:
elit = ";"
default:
if e.tok.IsLiteral() {
// no CRs in raw string literals
elit = e.lit
if elit[0] == '`' {
elit = string(stripCR([]byte(elit)))
}
} else if e.tok.IsKeyword() {
elit = e.lit
}
}
if lit != elit {
t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
}
if tok == token.EOF {
break
}
// update position
epos.Offset += len(e.lit) + len(whitespace)
epos.Line += newlineCount(e.lit) + whitespace_linecount
}
if s.ErrorCount != 0 {
t.Errorf("found %d errors", s.ErrorCount)
}
}