本文整理汇总了Golang中go/scanner.Scanner.Init方法的典型用法代码示例。如果您正苦于以下问题:Golang Scanner.Init方法的具体用法?Golang Scanner.Init怎么用?Golang Scanner.Init使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类go/scanner.Scanner
的用法示例。
在下文中一共展示了Scanner.Init方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewToken
func NewToken(src string, cursor int) tokenIterator {
src = src[:cursor] + ";" + src[cursor:]
tokens := make([]tokenItem, 0, 1000)
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
s.Init(file, []byte(src), nil, 0)
index := 0
for {
pos, tok, lit := s.Scan()
if tok == token.EOF {
break
}
off := fset.Position(pos).Offset
tokens = append(tokens, tokenItem{
off: off,
tok: tok,
lit: lit,
})
if cursor > off {
index++
}
}
return tokenIterator{
tokens: tokens,
index: index,
}
}
示例2: formatCode
func formatCode(src []byte, annotations []doc.TypeAnnotation) string {
// Collect comment positions in type annotation with Name = ""
var (
comments []doc.TypeAnnotation
s scanner.Scanner
)
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
s.Init(file, src, nil, scanner.ScanComments)
commentLoop:
for {
pos, tok, lit := s.Scan()
switch tok {
case token.EOF:
break commentLoop
case token.COMMENT:
p := file.Offset(pos)
comments = append(comments, doc.TypeAnnotation{Pos: p, End: p + len(lit)})
}
}
// Merge type annotations and comments without modifying the caller's slice
// of annoations.
switch {
case len(comments) == 0:
// nothing to do
case len(annotations) == 0:
annotations = comments
default:
annotations = append(comments, annotations...)
sort.Sort(sortByPos(annotations))
}
var buf bytes.Buffer
last := 0
for _, a := range annotations {
template.HTMLEscape(&buf, src[last:a.Pos])
if a.Name != "" {
p := a.ImportPath
if p != "" {
p = "/" + p
}
buf.WriteString(`<a href="`)
buf.WriteString(urlFn(p))
buf.WriteByte('#')
buf.WriteString(urlFn(a.Name))
buf.WriteString(`">`)
template.HTMLEscape(&buf, src[a.Pos:a.End])
buf.WriteString(`</a>`)
} else {
buf.WriteString(`<span class="com">`)
template.HTMLEscape(&buf, src[a.Pos:a.End])
buf.WriteString(`</span>`)
}
last = a.End
}
template.HTMLEscape(&buf, src[last:])
return buf.String()
}
示例3: ExampleScanner_Scan
func ExampleScanner_Scan() {
// src is the input that we want to tokenize.
src := []byte("cos(x) + 1i*sin(x) // Euler")
// Initialize the scanner.
var s scanner.Scanner
fset := token.NewFileSet() // positions are relative to fset
file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
// Repeated calls to Scan yield the token sequence found in the input.
for {
pos, tok, lit := s.Scan()
if tok == token.EOF {
break
}
fmt.Printf("%s\t%s\t%q\n", fset.Position(pos), tok, lit)
}
// output:
// 1:1 IDENT "cos"
// 1:4 ( ""
// 1:5 IDENT "x"
// 1:6 ) ""
// 1:8 + ""
// 1:10 IMAG "1i"
// 1:12 * ""
// 1:13 IDENT "sin"
// 1:16 ( ""
// 1:17 IDENT "x"
// 1:18 ) ""
// 1:20 ; "\n"
// 1:20 COMMENT "// Euler"
}
示例4: newScanner
// newScanner creates a new scanner that scans that given input bytes.
func newScanner(input []byte) (*scanner.Scanner, *scanner.ErrorVector) {
sc := new(scanner.Scanner)
ev := new(scanner.ErrorVector)
file := fset.AddFile("input", fset.Base(), len(input))
sc.Init(file, input, ev, 0)
return sc, ev
}
示例5: formatHistory
// formatHistory reformats the provided Go source by collapsing all lines
// and adding semicolons where required, suitable for adding to line history.
func formatHistory(input []byte) string {
var buf bytes.Buffer
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(input))
s.Init(file, input, nil, 0)
pos, tok, lit := s.Scan()
for tok != token.EOF {
if int(pos)-1 > buf.Len() {
n := int(pos) - 1 - buf.Len()
buf.WriteString(strings.Repeat(" ", n))
}
var semicolon bool
if tok == token.SEMICOLON {
semicolon = true
} else if lit != "" {
buf.WriteString(lit)
} else {
buf.WriteString(tok.String())
}
pos, tok, lit = s.Scan()
if semicolon {
switch tok {
case token.RBRACE, token.RPAREN, token.EOF:
default:
buf.WriteRune(';')
}
}
}
return buf.String()
}
示例6: Update
func (this *tokenizedGoContent) Update() {
content := this.GetSources()[0].(caret.MultilineContentI)
this.segments = nil
src := []byte(content.Content())
//src := []byte(w.Content.Content()[w.Content.Line(beginLineIndex).Start:w.Content.Line(endLineIndex).Start])
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
s.Init(file, src, nil, scanner.ScanComments)
// Repeated calls to Scan yield the token sequence found in the input.
// TODO: Perhaps include whitespace in between tokens?
for {
pos, tok, lit := s.Scan()
if tok == token.EOF {
break
}
offset := uint32(fset.Position(pos).Offset)
this.segments = append(this.segments, tokLit{offset: offset, tok: tok, lit: lit})
}
// HACK: Fake last element.
this.segments = append(this.segments, tokLit{offset: uint32(content.LenContent())})
}
示例7: PipeInit
// PipeInit initializes a pipeline; input will be read from iReader.
func PipeInit(iReader io.Reader) *Pipe {
input := make(chan Token)
output := make(chan string)
sync := make(chan interface{})
p := &Pipe{input, output, sync}
src, err := ioutil.ReadAll(iReader)
if err != nil {
panic(err)
}
fset := token.NewFileSet()
file := fset.AddFile("<stdin>", fset.Base(), len(src))
s := scanner.Scanner{}
s.Init(file, src, nil, scanner.ScanComments)
go func() {
pos, tok, str := s.Scan()
for tok != token.EOF {
if len(str) == 0 {
str = tok.String()
}
if tok == token.COMMENT {
str = str + "\n"
}
input <- Token{fset.Position(pos), tok, str}
<-sync // wait for sent token to land
pos, tok, str = s.Scan()
}
close(input)
}()
return p
}
示例8: Scan
// Scan scans the specified Go source file and returns a channel with Token.
//
// The EOF token is not returned, and the last token does not contain the "\n"
// character.
func Scan(name string, input []byte) chan *Token {
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile(name, fset.Base(), len(input))
tokens := make(chan *Token)
out := make(chan *Token)
s.Init(file, input, nil, scanner.ScanComments)
l := lexer{
input: string(input),
file: file,
s: s,
tokens: tokens,
out: out,
}
// In the first stage we collect tokens, their literal code and their
// offset in the source code.
go l.run1()
// In the second stage we add white space after each token.
go l.run2()
return out
}
示例9: injectImport
func injectImport(src string) string {
const inj = `
import __yyfmt__ "fmt"
`
fset := token.NewFileSet()
file := fset.AddFile("", -1, len(src))
var s scanner.Scanner
s.Init(
file,
[]byte(src),
nil,
scanner.ScanComments,
)
for {
switch _, tok, _ := s.Scan(); tok {
case token.EOF:
return inj + src
case token.PACKAGE:
s.Scan() // ident
pos, _, _ := s.Scan()
ofs := file.Offset(pos)
return src[:ofs] + inj + src[ofs:]
}
}
}
示例10: expectedErrors
// expectedErrors collects the regular expressions of ERROR comments found
// in files and returns them as a map of error positions to error messages.
//
func expectedErrors(t *testing.T, filename string, src []byte) map[token.Pos]string {
errors := make(map[token.Pos]string)
var s scanner.Scanner
// file was parsed already - do not add it again to the file
// set otherwise the position information returned here will
// not match the position information collected by the parser
s.Init(getFile(filename), src, nil, scanner.ScanComments)
var prev token.Pos // position of last non-comment, non-semicolon token
for {
pos, tok, lit := s.Scan()
switch tok {
case token.EOF:
return errors
case token.COMMENT:
s := errRx.FindStringSubmatch(lit)
if len(s) == 2 {
errors[prev] = string(s[1])
}
default:
prev = pos
}
}
panic("unreachable")
}
示例11: main2
func main2() {
filename := "/Users/obaskakov/IdeaProjects/goCrazy/code.scm"
src, err := ioutil.ReadFile(filename)
check(err)
// src := []byte("cos(x) + 1i*sin(x) // Euler")
// Initialize the scanner.
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
// Repeated calls to Scan yield the token sequence found in the input.
for {
_, tok, lit := s.Scan()
if tok == token.EOF {
break
}
fmt.Printf("\t%s %q\n", tok, lit)
// fmt.Printf("%s\t%s\t%q\n", fset.Position(pos), tok, lit)
}
}
示例12: expectedErrors
// expectedErrors collects the regular expressions of ERROR comments found
// in files and returns them as a map of error positions to error messages.
//
func expectedErrors(t *testing.T, testname string, files map[string]*ast.File) map[token.Pos]string {
errors := make(map[token.Pos]string)
for filename := range files {
src, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("%s: could not read %s", testname, filename)
}
var s scanner.Scanner
// file was parsed already - do not add it again to the file
// set otherwise the position information returned here will
// not match the position information collected by the parser
s.Init(getFile(filename), src, nil, scanner.ScanComments)
var prev token.Pos // position of last non-comment token
scanFile:
for {
pos, tok, lit := s.Scan()
switch tok {
case token.EOF:
break scanFile
case token.COMMENT:
s := errRx.FindStringSubmatch(lit)
if len(s) == 2 {
errors[prev] = string(s[1])
}
default:
prev = pos
}
}
}
return errors
}
示例13: new_token_iterator
func new_token_iterator(src []byte, cursor int) token_iterator {
tokens := make([]token_item, 0, 1000)
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
s.Init(file, src, nil, 0)
token_index := 0
for {
pos, tok, lit := s.Scan()
if tok == token.EOF {
break
}
off := fset.Position(pos).Offset
tokens = append(tokens, token_item{
off: off,
tok: tok,
lit: lit,
})
if cursor > off {
token_index++
}
}
return token_iterator{
tokens: tokens,
token_index: token_index,
}
}
示例14: expectedErrors
// expectedErrors collects the regular expressions of ERROR comments
// found in the package files of pkg and returns them in sorted order
// (by filename and position).
func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) {
// scan all package files
for filename := range pkg.Files {
src, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("expectedErrors(%s): %v", pkg.Name, err)
}
var s scanner.Scanner
file := fset.AddFile(filename, fset.Base(), len(src))
s.Init(file, src, nil, scanner.ScanComments)
var prev token.Pos // position of last non-comment token
loop:
for {
pos, tok, lit := s.Scan()
switch tok {
case token.EOF:
break loop
case token.COMMENT:
s := errRx.FindStringSubmatch(lit)
if len(s) == 2 {
list = append(list, &scanner.Error{fset.Position(prev), string(s[1])})
}
default:
prev = pos
}
}
}
sort.Sort(list) // multiple files may not be sorted
return
}
示例15: newTokenIterator
func newTokenIterator(src []byte, cursor int) (tokenIterator, int) {
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
cursorPos := file.Pos(cursor)
var s scanner.Scanner
s.Init(file, src, nil, 0)
tokens := make([]tokenItem, 0, 1000)
lastPos := token.NoPos
for {
pos, tok, lit := s.Scan()
if tok == token.EOF || pos >= cursorPos {
break
}
tokens = append(tokens, tokenItem{
tok: tok,
lit: lit,
})
lastPos = pos
}
return tokenIterator{
tokens: tokens,
pos: len(tokens) - 1,
}, int(cursorPos - lastPos)
}