本文整理汇总了Golang中github.com/araddon/gou.Debug函数的典型用法代码示例。如果您正苦于以下问题:Golang Debug函数的具体用法?Golang Debug怎么用?Golang Debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: XXXTestBulkErrors
func XXXTestBulkErrors(t *testing.T) {
// lets set a bad port, and hope we get a conn refused error?
c := NewTestConn()
c.Port = "27845"
defer func() {
c.Port = "9200"
}()
indexer := c.NewBulkIndexerErrors(10, 1)
indexer.Start()
errorCt := 0
go func() {
for i := 0; i < 20; i++ {
date := time.Unix(1257894000, 0)
data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)}
indexer.Index("users", "user", strconv.Itoa(i), "", &date, data, true)
}
}()
var errBuf *ErrorBuffer
for errBuf = range indexer.ErrorChannel {
errorCt++
break
}
if errBuf.Buf.Len() > 0 {
gou.Debug(errBuf.Err)
}
assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt))
indexer.Stop()
}
示例2: FluentdFormatter
// Fluentd format [date source jsonmessage] parser
func FluentdFormatter(logstashType string, tags []string) LineTransform {
return func(d *LineEvent) *Event {
//2012-11-22 05:07:51 +0000 lio.home.ubuntu.log.collect.log.vm2: {"message":"runtime error: close of closed channel"}
if lineParts := bytes.SplitN(d.Data, []byte{':', ' '}, 2); len(lineParts) > 1 {
if len(lineParts[0]) > 26 {
u.Debug("%s %s\n", string(lineParts[0]), string(lineParts[1]))
bsrc := lineParts[0][26:]
bdate := lineParts[0][0:25]
var msg map[string]interface{}
if err := json.Unmarshal(lineParts[1], &msg); err == nil {
if t, err := time.Parse("2006-01-02 15:04:05 -0700", string(bdate)); err == nil {
evt := NewTsEvent(logstashType, string(bsrc), "", t)
if msgi, ok := msg["message"]; ok {
if msgS, ok := msgi.(string); ok {
evt.Message = msgS
delete(msg, "message")
}
}
evt.Tags = tags
evt.Fields = msg
return evt
} else {
u.Debug("%v", err)
return NewEvent(logstashType, string(bsrc), string(lineParts[1]))
}
} else {
u.Warn("bad message? %v", err)
}
}
}
return nil
}
}
示例3: TestBulkErrors
func TestBulkErrors(t *testing.T) {
// lets set a bad port, and hope we get a connection refused error?
api.Port = "27845"
defer func() {
api.Port = "9200"
}()
BulkDelaySeconds = 1
indexer := NewBulkIndexerErrors(10, 1)
done := make(chan bool)
indexer.Run(done)
errorCt := 0
go func() {
for i := 0; i < 20; i++ {
date := time.Unix(1257894000, 0)
data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)}
indexer.Index("users", "user", strconv.Itoa(i), "", &date, data)
}
}()
for errBuf := range indexer.ErrorChannel {
errorCt++
gou.Debug(errBuf.Err)
break
}
assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt))
done <- true
}
示例4: MakeCustomHandler
func MakeCustomHandler(msgsOut chan *LineEvent) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
stream := qs.Get("stream")
if len(stream) < 1 {
stream = qs.Get(":stream")
if len(stream) < 1 {
io.WriteString(w, "Requires a 'stream' qs param ")
return
} else {
qs.Del(":stream")
}
}
var data []byte
var err error
if r.Body != nil {
data, err = ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
u.Log(u.ERROR, err.Error())
io.WriteString(w, "Requires valid monit parse")
return
}
} else {
data = []byte(qs.Encode())
}
u.Debug(stream, string(data))
//u.Error("Not implemented")
msgsOut <- &LineEvent{Data: data, Source: stream}
}
}
示例5: verifyTokenTypes
func verifyTokenTypes(t *testing.T, sql string, tt []TokenType) {
l := NewSqlLexer(sql)
u.Debug(sql)
for _, tokenType := range tt {
tok := l.NextToken()
//u.Infof("%#v expects:%v", tok, tokenType)
assert.Equalf(t, tok.T, tokenType, "want='%v' has %v ", tokenType, tok.T)
}
}
示例6: parseSelect
// First keyword was SELECT, so use the SELECT parser rule-set
func (m *FilterQLParser) parseSelect() (*FilterStatement, error) {
req := NewFilterStatement()
req.Raw = m.l.RawInput()
m.Next() // Consume the SELECT
if m.Cur().T != lex.TokenStar && m.Cur().T != lex.TokenMultiply {
u.Warnf("token? %v", m.Cur())
return nil, fmt.Errorf("Must use SELECT * currently %s", req.Raw)
}
m.Next() // Consume *
// OPTIONAL From clause
if m.Cur().T == lex.TokenFrom {
m.Next()
if m.Cur().T == lex.TokenIdentity || m.Cur().T == lex.TokenTable {
req.From = m.Cur().V
m.Next()
}
}
if m.Cur().T != lex.TokenWhere {
return nil, fmt.Errorf("Must use SELECT * FROM [table] WHERE: %s", req.Raw)
}
req.Keyword = m.Cur().T
m.Next() // Consume WHERE
// one top level filter which may be nested
if err := m.parseWhereExpr(req); err != nil {
u.Debug(err)
return nil, err
}
// LIMIT
if err := m.parseLimit(req); err != nil {
return nil, err
}
// ALIAS
if err := m.parseAlias(req); err != nil {
return nil, err
}
if m.Cur().T == lex.TokenEOF || m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenRightParenthesis {
// if err := req.Finalize(); err != nil {
// u.Errorf("Could not finalize: %v", err)
// return nil, err
// }
// we are good
return req, nil
}
u.Warnf("Could not complete parsing, return error: %v %v", m.Cur(), m.l.PeekWord())
return nil, fmt.Errorf("Did not complete parsing input: %v", m.LexTokenPager.Cur().V)
}
示例7: TestToSql
// We need to be able to re-write queries, as we during joins we have
// to re-write query that we are going to send to a single data source
func TestToSql(t *testing.T) {
for _, sqlStrIn := range sqlStrings {
u.Debug("parsing next one ", sqlStrIn)
stmt1 := parseOrPanic(t, sqlStrIn)
sqlSel1 := stmt1.(*SqlSelect)
sqlRt := sqlSel1.StringAST()
u.Warnf("About to parse roundtrip \n%v", sqlRt)
stmt2 := parseOrPanic(t, sqlRt)
compareAst(t, stmt1, stmt2)
}
}
示例8: StdinPruducer
// sends messages from stdin for consumption
func StdinPruducer(msgChan chan *LineEvent) {
b := bufio.NewReader(os.Stdin)
lineHandler := MakeFileFlattener("stdin", msgChan)
u.Debug("reading from stdin with lines defined by newline")
for {
if s, e := b.ReadString('\n'); e == nil {
//u.Info(s)
lineHandler(s)
} else if e == io.EOF {
return
}
}
}
示例9: parse
// parse the request
func (m *Parser) parse() (*Ast, error) {
comment := m.initialComment()
u.Debug(comment)
// Now, find First Keyword
switch m.curToken.T {
case lex.TokenSelect:
m.initialKeyword = m.curToken
return m.parseSelect(comment)
default:
return nil, fmt.Errorf("Unrecognized query, expected [SELECT] influx ql")
}
u.Warnf("Whoops, that didn't work: \n%v \n\t%v", m.curToken, m.qryText)
return nil, fmt.Errorf("Unkwown error on request")
}
示例10: TestBulkUpdate
func TestBulkUpdate(t *testing.T) {
InitTests(true)
api.Port = "9200"
indexer := NewBulkIndexer(3)
indexer.BulkSendor = func(buf *bytes.Buffer) error {
messageSets += 1
totalBytesSent += buf.Len()
buffers = append(buffers, buf)
gou.Debug(string(buf.Bytes()))
return BulkSend(buf)
}
done := make(chan bool)
indexer.Run(done)
date := time.Unix(1257894000, 0)
user := map[string]interface{}{
"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "count": 1,
}
// Lets make sure the data is in the index ...
_, err := Index(true, "users", "user", "5", user)
// script and params
data := map[string]interface{}{
"script": "ctx._source.count += 2",
}
err = indexer.Update("users", "user", "5", "", &date, data)
// So here's the deal. Flushing does seem to work, you just have to give the
// channel a moment to recieve the message ...
// <- time.After(time.Millisecond * 20)
// indexer.Flush()
done <- true
WaitFor(func() bool {
return len(buffers) > 0
}, 5)
assert.T(t, BulkErrorCt == 0 && err == nil, fmt.Sprintf("Should not have any errors %v", err))
response, err := Get(true, "users", "user", "5")
assert.T(t, err == nil, fmt.Sprintf("Should not have any errors %v", err))
newCount := response.Source.(map[string]interface{})["count"]
assert.T(t, newCount.(float64) == 3,
fmt.Sprintf("Should have update count: %#v ... %#v", response.Source.(map[string]interface{})["count"], response))
}
示例11: Clone
// Initial Creation of this repo
func (s *Git) Clone(d *Dep) error {
if !d.exists {
// new, initial clone?
// [email protected]:lytics/cache.git
parts := strings.Split(d.Src, "/")
// 0: github.com 1:lytics 2:cache
if len(parts) < 2 {
return fmt.Errorf("Invalid src? %s", d.Src)
}
gitPath := fmt.Sprintf("[email protected]%s:%s/%s.git", parts[0], parts[1], parts[2])
u.Warnf("cloning src? %s", gitPath)
cmdgit := exec.Command("git", "clone", gitPath)
cmdgit.Dir = d.ParentDir()
out, err := cmdgit.Output()
u.Debug(string(out), err)
return err
}
return nil
}
示例12: checkClean
func (d Dependencies) checkClean(allowNonClean bool) bool {
var wg sync.WaitGroup
hasErrors := false
for _, dep := range d {
wg.Add(1)
go func(depIn *Dep) {
depIn.createPath()
// generally we are going to force clean on all directories unless overridden
if !allowNonClean {
if !depIn.Clean() {
u.Debug(depIn)
hasErrors = true
}
}
wg.Done()
}(dep)
}
wg.Wait()
return hasErrors
}
示例13: TailFile
func TailFile(filename string, config tail.Config, done chan bool, msgChan chan *LineEvent) {
u.Debug("Watching file ", filename, config)
t, err := tail.TailFile(filename, config)
if err != nil {
u.Error(err)
return
}
//defer func() { done <- true }()
lineHandler := MakeFileFlattener(filename, msgChan)
for line := range t.Lines {
lineHandler(line.Text)
}
err = t.Wait()
if err != nil {
u.Error(err)
}
if err := t.Stop(); err != nil {
u.Info(err)
}
}
示例14: TestBulkIndexerBasic
func TestBulkIndexerBasic(t *testing.T) {
InitTests(true)
indexer := NewBulkIndexer(3)
indexer.BulkSendor = func(buf *bytes.Buffer) error {
messageSets += 1
totalBytesSent += buf.Len()
buffers = append(buffers, buf)
gou.Debug(string(buf.Bytes()))
return BulkSend(buf)
}
done := make(chan bool)
indexer.Run(done)
date := time.Unix(1257894000, 0)
data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)}
err := indexer.Index("users", "user", "1", "", &date, data)
WaitFor(func() bool {
return len(buffers) > 0
}, 5)
// part of request is url, so lets factor that in
//totalBytesSent = totalBytesSent - len(*eshost)
assert.T(t, len(buffers) == 1, fmt.Sprintf("Should have sent one operation but was %d", len(buffers)))
assert.T(t, BulkErrorCt == 0 && err == nil, fmt.Sprintf("Should not have any errors %v", err))
assert.T(t, totalBytesSent == 145, fmt.Sprintf("Should have sent 135 bytes but was %v", totalBytesSent))
err = indexer.Index("users", "user", "2", "", nil, data)
<-time.After(time.Millisecond * 10) // we need to wait for doc to hit send channel
// this will test to ensure that Flush actually catches a doc
indexer.Flush()
totalBytesSent = totalBytesSent - len(*eshost)
assert.T(t, err == nil, fmt.Sprintf("Should have nil error =%v", err))
assert.T(t, len(buffers) == 2, fmt.Sprintf("Should have another buffer ct=%d", len(buffers)))
assert.T(t, BulkErrorCt == 0, fmt.Sprintf("Should not have any errors %d", BulkErrorCt))
assert.T(t, CloseInt(totalBytesSent, 257), fmt.Sprintf("Should have sent 257 bytes but was %v", totalBytesSent))
done <- true
}
示例15: parseSqlSelect
// First keyword was SELECT, so use the SELECT parser rule-set
func (m *Sqlbridge) parseSqlSelect() (*SqlSelect, error) {
req := NewSqlSelect()
req.Raw = m.l.RawInput()
m.Next() // Consume Select?
// Optional DISTINCT keyword always immediately after SELECT KW
if m.Cur().T == lex.TokenDistinct {
m.Next()
req.Distinct = true
}
// columns
//if m.Cur().T != lex.TokenStar {
if err := parseColumns(m, m.funcs, m.buildVm, req); err != nil {
u.Debug(err)
return nil, err
}
// } else if err := m.parseSelectStar(req); err != nil {
// u.Debug(err)
// return nil, err
// }
//u.Debugf("cur? %v", m.Cur())
// select @@myvar limit 1
if m.Cur().T == lex.TokenLimit {
if err := m.parseLimit(req); err != nil {
return req, nil
}
if m.isEnd() {
return req, nil
}
}
// SPECIAL END CASE for simple selects
// SELECT last_insert_id();
if m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenEOF {
// valid end
return req, nil
}
// INTO
if errreq := m.parseInto(req); errreq != nil {
return nil, errreq
}
// FROM
if errreq := m.parseSources(req); errreq != nil {
return nil, errreq
}
// WHERE
if errreq := m.parseWhereSelect(req); errreq != nil {
return nil, errreq
}
// GROUP BY
//u.Debugf("GroupBy? : %v", m.Cur())
if errreq := m.parseGroupBy(req); errreq != nil {
return nil, errreq
}
// HAVING
//u.Debugf("Having? : %v", m.Cur())
if errreq := m.parseHaving(req); errreq != nil {
return nil, errreq
}
// ORDER BY
//u.Debugf("OrderBy? : %v", m.Cur())
if errreq := m.parseOrderBy(req); errreq != nil {
return nil, errreq
}
// LIMIT
if err := m.parseLimit(req); err != nil {
return nil, err
}
// WITH
with, err := ParseWith(m.SqlTokenPager)
if err != nil {
return nil, err
}
req.With = with
// ALIAS
if err := m.parseAlias(req); err != nil {
return nil, err
}
if m.Cur().T == lex.TokenEOF || m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenRightParenthesis {
if err := req.Finalize(); err != nil {
u.Errorf("Could not finalize: %v", err)
return nil, err
}
// we are good
//.........这里部分代码省略.........