本文整理汇总了Golang中go/token.FileSet.Base方法的典型用法代码示例。如果您正苦于以下问题:Golang FileSet.Base方法的具体用法?Golang FileSet.Base怎么用?Golang FileSet.Base使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类go/token.FileSet
的用法示例。
在下文中一共展示了FileSet.Base方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: init
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
p.scanner.Init(p.file, src, p, scannerMode(mode))
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
}
示例2: init
func (p *parser) init(fset *token.FileSet, filename string, src []byte) {
p.ErrorVector.Reset()
p.file = fset.AddFile(filename, fset.Base(), len(src))
p.scanner.Init(p.file, src, p, scanner.AllowIllegalChars) // return '@' as token.ILLEGAL w/o error message
p.next() // initializes pos, tok, lit
p.packs = make(map[string]string)
p.rules = make(map[string]expr)
}
示例3: parse
func (p *ebnfParser) parse(fset *token.FileSet, out io.Writer, src []byte) {
// initialize ebnfParser
p.out = out
p.src = src
p.file = fset.AddFile("", fset.Base(), len(src))
p.scanner.Init(p.file, src, p, scanner.AllowIllegalChars)
p.next() // initializes pos, tok, lit
// process source
for p.tok != token.EOF {
p.parseProduction()
}
p.flush()
}
示例4: parse
func (p *parser) parse(fset *token.FileSet, filename string, src []byte) Grammar {
// initialize parser
p.fset = fset
p.ErrorVector.Reset()
p.scanner.Init(fset.AddFile(filename, fset.Base(), len(src)), src, p, scanner.AllowIllegalChars)
p.next() // initializes pos, tok, lit
grammar := make(Grammar)
for p.tok != token.EOF {
prod := p.parseProduction()
name := prod.Name.String
if _, found := grammar[name]; !found {
grammar[name] = prod
} else {
p.error(prod.Pos(), name+" declared already")
}
}
return grammar
}
示例5: printDecl
func printDecl(decl ast.Node, fset *token.FileSet, buf []byte) (Code, []byte) {
v := &annotationVisitor{}
ast.Walk(v, decl)
buf = buf[:0]
err := (&printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}).Fprint(sliceWriter{&buf}, fset, decl)
if err != nil {
return Code{Text: err.Error()}, buf
}
var annotations []Annotation
var s scanner.Scanner
fset = token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(buf))
s.Init(file, buf, nil, scanner.ScanComments)
loop:
for {
pos, tok, lit := s.Scan()
switch tok {
case token.EOF:
break loop
case token.COMMENT:
p := file.Offset(pos)
e := p + len(lit)
if p > math.MaxInt16 || e > math.MaxInt16 {
break loop
}
annotations = append(annotations, Annotation{Kind: CommentAnnotation, Pos: int16(p), End: int16(e)})
case token.IDENT:
if len(v.annotations) == 0 {
// Oops!
break loop
}
annotation := v.annotations[0]
v.annotations = v.annotations[1:]
if annotation.Kind == -1 {
continue
}
p := file.Offset(pos)
e := p + len(lit)
if p > math.MaxInt16 || e > math.MaxInt16 {
break loop
}
annotation.Pos = int16(p)
annotation.End = int16(e)
if len(annotations) > 0 && annotation.Kind == ExportLinkAnnotation {
prev := annotations[len(annotations)-1]
if prev.Kind == PackageLinkAnnotation &&
prev.ImportPath == annotation.ImportPath &&
prev.End+1 == annotation.Pos {
// merge with previous
annotation.Pos = prev.Pos
annotations[len(annotations)-1] = annotation
continue loop
}
}
annotations = append(annotations, annotation)
}
}
return Code{Text: string(buf), Annotations: annotations}, buf
}