本文整理汇总了Golang中github.com/tcard/sgo/sgo/token.FileSet.File方法的典型用法代码示例。如果您正苦于以下问题:Golang FileSet.File方法的具体用法?Golang FileSet.File怎么用?Golang FileSet.File使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/tcard/sgo/sgo/token.FileSet
的用法示例。
在下文中一共展示了FileSet.File方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: convertAST
func convertAST(info *types.Info, src []byte, sgoAST *ast.File, fset *token.FileSet) []byte {
c := converter{
Info: info,
src: src,
base: fset.File(sgoAST.Pos()).Base() - 1,
fset: fset,
}
c.convertFile(sgoAST)
return bytes.Join(append(c.dstChunks, src[c.lastChunkEnd:]), nil)
}
示例2: findInterval
// findInterval parses input and returns the [start, end) positions of
// the first occurrence of substr in input. f==nil indicates failure;
// an error has already been reported in that case.
//
func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) {
f, err := parser.ParseFile(fset, "<input>", input, 0)
if err != nil {
t.Errorf("parse error: %s", err)
return
}
i := strings.Index(input, substr)
if i < 0 {
t.Errorf("%q is not a substring of input", substr)
f = nil
return
}
filePos := fset.File(f.Package)
return f, filePos.Pos(i), filePos.Pos(i + len(substr))
}
示例3: sortImports
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
func sortImports(fset *token.FileSet, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
// Not an import declaration, so we're done.
// Imports are always first.
break
}
if len(d.Specs) == 0 {
// Empty import block, remove it.
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
}
if !d.Lparen.IsValid() {
// Not a block: sorted by default.
continue
}
// Identify and sort runs of specs on successive lines.
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
// j begins a new run. End this one.
specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
i = j
}
}
specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
lastLine := fset.Position(lastSpec.Pos()).Line
if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
fset.File(d.Rparen).MergeLine(rParenLine - 1)
}
}
}
}
示例4: sortSpecs
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
if len(specs) <= 1 {
return specs
}
// Record positions for specs.
pos := make([]posSpan, len(specs))
for i, s := range specs {
pos[i] = posSpan{s.Pos(), s.End()}
}
// Identify comments in this range.
// Any comment from pos[0].Start to the final line counts.
lastLine := fset.Position(pos[len(pos)-1].End).Line
cstart := len(f.Comments)
cend := len(f.Comments)
for i, g := range f.Comments {
if g.Pos() < pos[0].Start {
continue
}
if i < cstart {
cstart = i
}
if fset.Position(g.End()).Line > lastLine {
cend = i
break
}
}
comments := f.Comments[cstart:cend]
// Assign each comment to the import spec preceding it.
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
specIndex := 0
for _, g := range comments {
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
specIndex++
}
s := specs[specIndex].(*ast.ImportSpec)
importComment[s] = append(importComment[s], g)
}
// Sort the import specs by import path.
// Remove duplicates, when possible without data loss.
// Reassign the import paths to have the same position sequence.
// Reassign each comment to abut the end of its spec.
// Sort the comments by new position.
sort.Sort(byImportSpec(specs))
// Dedup. Thanks to our sorting, we can just consider
// adjacent pairs of imports.
deduped := specs[:0]
for i, s := range specs {
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
deduped = append(deduped, s)
} else {
p := s.Pos()
fset.File(p).MergeLine(fset.Position(p).Line)
}
}
specs = deduped
// Fix up comment positions
for i, s := range specs {
s := s.(*ast.ImportSpec)
if s.Name != nil {
s.Name.NamePos = pos[i].Start
}
s.Path.ValuePos = pos[i].Start
s.EndPos = pos[i].End
for _, g := range importComment[s] {
for _, c := range g.List {
c.Slash = pos[i].End
}
}
}
sort.Sort(byCommentPos(comments))
return specs
}
示例5: AddNamedImport
// AddNamedImport adds the import path to the file f, if absent.
// If name is not empty, it is used to rename the import.
//
// For example, calling
// AddNamedImport(fset, f, "pathpkg", "path")
// adds
// import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
if imports(f, ipath) {
return false
}
newImport := &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
Value: strconv.Quote(ipath),
},
}
if name != "" {
newImport.Name = &ast.Ident{Name: name}
}
// Find an import decl to add to.
// The goal is to find an existing import
// whose import path has the longest shared
// prefix with ipath.
var (
bestMatch = -1 // length of longest shared prefix
lastImport = -1 // index in f.Decls of the file's final import decl
impDecl *ast.GenDecl // import decl containing the best match
impIndex = -1 // spec index in impDecl containing the best match
)
for i, decl := range f.Decls {
gen, ok := decl.(*ast.GenDecl)
if ok && gen.Tok == token.IMPORT {
lastImport = i
// Do not add to import "C", to avoid disrupting the
// association with its doc comment, breaking cgo.
if declImports(gen, "C") {
continue
}
// Match an empty import decl if that's all that is available.
if len(gen.Specs) == 0 && bestMatch == -1 {
impDecl = gen
}
// Compute longest shared prefix with imports in this group.
for j, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
n := matchLen(importPath(impspec), ipath)
if n > bestMatch {
bestMatch = n
impDecl = gen
impIndex = j
}
}
}
}
// If no import decl found, add one after the last import.
if impDecl == nil {
impDecl = &ast.GenDecl{
Tok: token.IMPORT,
}
if lastImport >= 0 {
impDecl.TokPos = f.Decls[lastImport].End()
} else {
// There are no existing imports.
// Our new import goes after the package declaration and after
// the comment, if any, that starts on the same line as the
// package declaration.
impDecl.TokPos = f.Package
file := fset.File(f.Package)
pkgLine := file.Line(f.Package)
for _, c := range f.Comments {
if file.Line(c.Pos()) > pkgLine {
break
}
impDecl.TokPos = c.End()
}
}
f.Decls = append(f.Decls, nil)
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
f.Decls[lastImport+1] = impDecl
}
// Insert new import at insertAt.
insertAt := 0
if impIndex >= 0 {
// insert after the found import
insertAt = impIndex + 1
}
impDecl.Specs = append(impDecl.Specs, nil)
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
impDecl.Specs[insertAt] = newImport
pos := impDecl.Pos()
if insertAt > 0 {
// If there is a comment after an existing import, preserve the comment
//.........这里部分代码省略.........
示例6: DeleteNamedImport
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
var delspecs []*ast.ImportSpec
// Find the import nodes that import path, if any.
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT {
continue
}
for j := 0; j < len(gen.Specs); j++ {
spec := gen.Specs[j]
impspec := spec.(*ast.ImportSpec)
if impspec.Name == nil && name != "" {
continue
}
if impspec.Name != nil && impspec.Name.Name != name {
continue
}
if importPath(impspec) != path {
continue
}
// We found an import spec that imports path.
// Delete it.
delspecs = append(delspecs, impspec)
deleted = true
copy(gen.Specs[j:], gen.Specs[j+1:])
gen.Specs = gen.Specs[:len(gen.Specs)-1]
// If this was the last import spec in this decl,
// delete the decl, too.
if len(gen.Specs) == 0 {
copy(f.Decls[i:], f.Decls[i+1:])
f.Decls = f.Decls[:len(f.Decls)-1]
i--
break
} else if len(gen.Specs) == 1 {
gen.Lparen = token.NoPos // drop parens
}
if j > 0 {
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
line := fset.Position(impspec.Path.ValuePos).Line
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
if line-lastLine > 1 {
// There was a blank line immediately preceding the deleted import,
// so there's no need to close the hole.
// Do nothing.
} else {
// There was no blank line. Close the hole.
fset.File(gen.Rparen).MergeLine(line)
}
}
j--
}
}
// Delete them from f.Imports.
for i := 0; i < len(f.Imports); i++ {
imp := f.Imports[i]
for j, del := range delspecs {
if imp == del {
copy(f.Imports[i:], f.Imports[i+1:])
f.Imports = f.Imports[:len(f.Imports)-1]
copy(delspecs[j:], delspecs[j+1:])
delspecs = delspecs[:len(delspecs)-1]
i--
break
}
}
}
if len(delspecs) > 0 {
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
}
return
}
示例7: fileWithAnnotationComments
func fileWithAnnotationComments(file *ast.File, fset, oldFset *token.FileSet, src []byte) ([]byte, *ast.File, error) {
// TODO: So this is an extremely hacky way of doing this. We're going to
// add the comments directly to the source comments, as text, and then
// we're going to re-parse it. This is because I tried manipulating the
// AST, adding the commments there an shifting the nodes' positions, but
// doing that right is very very convoluted; you need to be tracking all
// the time where you are, where you _were_, figure out where's a line
// break, etc. So, well, this will do for now.
var err error
var dstChunks [][]byte
var lastChunkEnd int
skipNextSpec := false
addDoc := func(node ast.Node, name *ast.Ident, typ ast.Expr) {
if typ == nil {
return
}
if name != nil && len(name.Name) > 0 {
c := name.Name[0]
if !(c >= 'A' && c <= 'Z') {
return
}
}
buf := &bytes.Buffer{}
err = printer.Fprint(buf, token.NewFileSet(), typ)
if err != nil {
return
}
pos := int(node.Pos()) - oldFset.File(file.Pos()).Base()
var space []byte
for i := pos - 1; i >= 0 && (src[i] == ' ' || src[i] == '\t'); i-- {
space = append([]byte{src[i]}, space...)
}
text := append([]byte("// For SGo: "+buf.String()+"\n"), space...)
dstChunks = append(dstChunks, src[lastChunkEnd:pos], text)
lastChunkEnd = pos
}
var visitor visitorFunc
visitor = visitorFunc(func(node ast.Node) (w ast.Visitor) {
var typ ast.Expr
var name *ast.Ident
switch node := node.(type) {
case *ast.FuncDecl:
typ = node.Type
name = node.Name
case *ast.GenDecl:
if node.Lparen != 0 || node.Tok == token.IMPORT || node.Tok == token.CONST {
return visitor
}
switch spec := node.Specs[0].(type) {
case *ast.TypeSpec:
skipNextSpec = true
typ = spec.Type
name = spec.Name
case *ast.ValueSpec:
skipNextSpec = true
typ = spec.Type
if len(spec.Names.List) > 0 {
name = spec.Names.List[0]
}
}
switch typ.(type) {
case *ast.InterfaceType, *ast.StructType:
return visitor
}
case *ast.InterfaceType:
for i := 0; i < len(node.Methods.List); i++ {
item := node.Methods.List[i]
if len(item.Names) > 0 {
name = item.Names[0]
}
addDoc(item, name, item.Type)
}
return visitor
case *ast.StructType:
for i := 0; i < len(node.Fields.List); i++ {
item := node.Fields.List[i]
if len(item.Names) > 0 {
name = item.Names[0]
}
addDoc(item, name, item.Type)
}
return visitor
case *ast.TypeSpec:
if skipNextSpec {
skipNextSpec = false
return visitor
}
typ = node.Type
name = node.Name
case *ast.ValueSpec:
if skipNextSpec {
skipNextSpec = false
return visitor
}
typ = node.Type
if len(node.Names.List) > 0 {
name = node.Names.List[0]
}
default:
return visitor
//.........这里部分代码省略.........