本文整理汇总了Golang中github.com/couchbase/indexing/secondary/logging.Tracef函数的典型用法代码示例。如果您正苦于以下问题:Golang Tracef函数的具体用法?Golang Tracef怎么用?Golang Tracef使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Tracef函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Receive
func Receive(conn transporter, buf []byte) (flags TransportFlag, payload []byte, err error) {
// transport de-framing
if err = fullRead(conn, buf[:pktDataOffset]); err != nil {
if err == io.EOF {
logging.Tracef("receiving packet: %v\n", err)
} else {
logging.Errorf("receiving packet: %v\n", err)
}
return
}
a, b := pktLenOffset, pktLenOffset+pktLenSize
pktlen := binary.BigEndian.Uint32(buf[a:b])
a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
flags = TransportFlag(binary.BigEndian.Uint16(buf[a:b]))
if maxLen := uint32(len(buf)); pktlen > maxLen {
logging.Errorf("receiving packet length %v > %v\n", pktlen, maxLen)
err = ErrorPacketOverflow
return
}
if err = fullRead(conn, buf[:pktlen]); err != nil {
if err == io.EOF {
logging.Tracef("receiving packet: %v\n", err)
} else {
logging.Errorf("receiving packet: %v\n", err)
}
return
}
return flags, buf[:pktlen], err
}
示例2: insertPrimaryIndex
func (fdb *fdbSlice) insertPrimaryIndex(key []byte, docid []byte, workerId int) {
var err error
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s", fdb.id, fdb.idxInstId, docid)
//check if the docid exists in the main index
t0 := time.Now()
if _, err = fdb.main[workerId].GetKV(key); err == nil {
fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
//skip
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Key %v Already Exists. "+
"Primary Index Update Skipped.", fdb.id, fdb.idxInstId, string(docid))
} else if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
"mainindex entry %v", fdb.id, fdb.idxInstId, err)
} else if err == forestdb.RESULT_KEY_NOT_FOUND {
//set in main index
t0 := time.Now()
if err = fdb.main[workerId].SetKV(key, nil); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
"Skipped Key %s. Error %v", fdb.id, fdb.idxInstId, string(docid), err)
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
}
}
示例3: initBucketFilter
//initBucketFilter initializes the bucket filter
func (r *mutationStreamReader) initBucketFilter(bucketFilter map[string]*common.TsVbuuid) {
r.syncLock.Lock()
defer r.syncLock.Unlock()
//allocate a new filter for the buckets which don't
//have a filter yet
for b, q := range r.bucketQueueMap {
if _, ok := r.bucketFilterMap[b]; !ok {
logging.Tracef("MutationStreamReader::initBucketFilter Added new filter "+
"for Bucket %v Stream %v", b, r.streamId)
//if there is non-nil filter, use that. otherwise use a zero filter.
if filter, ok := bucketFilter[b]; ok && filter != nil {
r.bucketFilterMap[b] = filter.Copy()
} else {
r.bucketFilterMap[b] = common.NewTsVbuuid(b, int(q.queue.GetNumVbuckets()))
}
r.bucketSyncDue[b] = false
}
}
//remove the bucket filters for which bucket doesn't exist anymore
for b, _ := range r.bucketFilterMap {
if _, ok := r.bucketQueueMap[b]; !ok {
logging.Tracef("MutationStreamReader::initBucketFilter Deleted filter "+
"for Bucket %v Stream %v", b, r.streamId)
delete(r.bucketFilterMap, b)
delete(r.bucketSyncDue, b)
}
}
}
示例4: handleEvent
func (vr *VbucketRoutine) handleEvent(m *mc.DcpEvent, seqno uint64) uint64 {
logging.Tracef(
traceMutFormat,
vr.logPrefix, m.Opaque, m.Seqno, m.Opcode, string(m.Key))
switch m.Opcode {
case mcd.DCP_STREAMREQ: // broadcast StreamBegin
if data := vr.makeStreamBeginData(seqno); data != nil {
vr.broadcast2Endpoints(data)
} else {
fmsg := "%v ##%x StreamBeginData NOT PUBLISHED\n"
logging.Errorf(fmsg, vr.logPrefix, m.Opaque)
}
case mcd.DCP_SNAPSHOT: // broadcast Snapshot
typ, start, end := m.SnapshotType, m.SnapstartSeq, m.SnapendSeq
logging.Tracef(ssFormat, vr.logPrefix, m.Opaque, start, end, typ)
if data := vr.makeSnapshotData(m, seqno); data != nil {
vr.broadcast2Endpoints(data)
} else {
fmsg := "%v ##%x Snapshot NOT PUBLISHED\n"
logging.Errorf(fmsg, vr.logPrefix, m.Opaque)
}
case mcd.DCP_MUTATION, mcd.DCP_DELETION, mcd.DCP_EXPIRATION:
seqno = m.Seqno // sequence number gets incremented only here
// prepare a data for each endpoint.
dataForEndpoints := make(map[string]interface{})
// for each engine distribute transformations to endpoints.
fmsg := "%v ##%x TransformRoute: %v\n"
for _, engine := range vr.engines {
err := engine.TransformRoute(vr.vbuuid, m, dataForEndpoints)
if err != nil {
logging.Errorf(fmsg, vr.logPrefix, m.Opaque, err)
continue
}
}
// send data to corresponding endpoint.
for raddr, data := range dataForEndpoints {
if endpoint, ok := vr.endpoints[raddr]; ok {
// FIXME: without the coordinator doing shared topic
// management, we will allow the feed to block.
// Otherwise, send might fail due to ErrorChannelFull
// or ErrorClosed
if err := endpoint.Send(data); err != nil {
msg := "%v ##%x endpoint(%q).Send() failed: %v"
logging.Debugf(msg, vr.logPrefix, m.Opaque, raddr, err)
endpoint.Close()
delete(vr.endpoints, raddr)
}
}
}
}
return seqno
}
示例5: printCtrl
func (vr *VbucketRoutine) printCtrl(v interface{}) {
switch val := v.(type) {
case map[string]c.RouterEndpoint:
for raddr := range val {
fmsg := "%v ##%x knows endpoint %v\n"
logging.Tracef(fmsg, vr.logPrefix, vr.opaque, raddr)
}
case map[uint64]*Engine:
for uuid := range val {
fmsg := "%v ##%x knows engine %v\n"
logging.Tracef(fmsg, vr.logPrefix, vr.opaque, uuid)
}
}
}
示例6: checkAndSetBucketFilter
//checkAndSetBucketFilter checks if mutation can be processed
//based on the current filter. Filter is also updated with new
//seqno/vbuuid if mutations can be processed.
func (r *mutationStreamReader) checkAndSetBucketFilter(meta *MutationMeta) bool {
r.syncLock.Lock()
defer r.syncLock.Unlock()
if filter, ok := r.bucketFilterMap[meta.bucket]; ok {
//the filter only checks if seqno of incoming mutation is greater than
//the existing filter. Also there should be a valid StreamBegin(vbuuid)
//for the vbucket. The vbuuid check is only to ensure that after stream
//restart for a bucket, mutations get processed only after StreamBegin.
//There can be residual mutations in projector endpoint queue after
//a bucket gets deleted from stream in case of multiple buckets.
//The vbuuid doesn't get reset after StreamEnd/StreamBegin. The
//filter can be extended for that check if required.
if uint64(meta.seqno) > filter.Seqnos[meta.vbucket] &&
filter.Vbuuids[meta.vbucket] != 0 {
filter.Seqnos[meta.vbucket] = uint64(meta.seqno)
filter.Vbuuids[meta.vbucket] = uint64(meta.vbuuid)
r.bucketSyncDue[meta.bucket] = true
return true
} else {
logging.Tracef("MutationStreamReader::checkAndSetBucketFilter Skipped "+
"Mutation %v for Bucket %v Stream %v. Current Filter %v", meta,
meta.bucket, r.streamId, filter.Seqnos[meta.vbucket])
return false
}
} else {
logging.Errorf("MutationStreamReader::checkAndSetBucketFilter Missing"+
"bucket %v in Filter for Stream %v", meta.bucket, r.streamId)
return false
}
}
示例7: doReceive
// receive requests from remote, when this function returns
// the connection is expected to be closed.
func (s *Server) doReceive(conn net.Conn, rcvch chan<- interface{}) {
raddr := conn.RemoteAddr()
// transport buffer for receiving
flags := transport.TransportFlag(0).SetProtobuf()
rpkt := transport.NewTransportPacket(s.maxPayload, flags)
rpkt.SetDecoder(transport.EncodingProtobuf, protobuf.ProtobufDecode)
logging.Infof("%v connection %q doReceive() ...\n", s.logPrefix, raddr)
loop:
for {
// TODO: Fix read timeout correctly
// timeoutMs := s.readDeadline * time.Millisecond
// conn.SetReadDeadline(time.Now().Add(timeoutMs))
req, err := rpkt.Receive(conn)
// TODO: handle close-connection and don't print error message.
if err != nil {
if err == io.EOF {
logging.Tracef("%v connection %q exited %v\n", s.logPrefix, raddr, err)
} else {
logging.Errorf("%v connection %q exited %v\n", s.logPrefix, raddr, err)
}
break loop
}
select {
case rcvch <- req:
case <-s.killch:
break loop
}
}
close(rcvch)
}
示例8: Send
func Send(conn transporter, buf []byte, flags TransportFlag, payload []byte) (err error) {
var n int
// transport framing
l := pktLenSize + pktFlagSize
if maxLen := len(buf); l > maxLen {
logging.Errorf("sending packet length %v > %v\n", l, maxLen)
err = ErrorPacketOverflow
return
}
a, b := pktLenOffset, pktLenOffset+pktLenSize
binary.BigEndian.PutUint32(buf[a:b], uint32(len(payload)))
a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
binary.BigEndian.PutUint16(buf[a:b], uint16(flags))
if n, err = conn.Write(buf[:pktDataOffset]); err == nil {
if n, err = conn.Write(payload); err == nil && n != len(payload) {
logging.Errorf("transport wrote only %v bytes for payload\n", n)
err = ErrorPacketWrite
}
laddr, raddr := conn.LocalAddr(), conn.RemoteAddr()
logging.Tracef("wrote %v bytes on connection %v->%v", len(payload), laddr, raddr)
} else if n != pktDataOffset {
logging.Errorf("transport wrote only %v bytes for header\n", n)
err = ErrorPacketWrite
}
return
}
示例9: makeResponsehandler
func makeResponsehandler(
client *qclient.GsiClient, conn *datastore.IndexConnection) qclient.ResponseHandler {
entryChannel := conn.EntryChannel()
stopChannel := conn.StopChannel()
return func(data qclient.ResponseReader) bool {
if err := data.Error(); err != nil {
conn.Error(n1qlError(client, err))
return false
}
skeys, pkeys, err := data.GetEntries()
if err == nil {
for i, skey := range skeys {
// Primary-key is mandatory.
e := &datastore.IndexEntry{
PrimaryKey: string(pkeys[i]),
}
e.EntryKey = skey2Values(skey)
fmsg := "current enqueued length: %d (max %d)\n"
l.Tracef(fmsg, len(entryChannel), cap(entryChannel))
select {
case entryChannel <- e:
case <-stopChannel:
return false
}
}
return true
}
conn.Error(errors.NewError(nil, err.Error()))
return false
}
}
示例10: Receive
// Receive payload from remote, decode, decompress the payload and return the
// payload.
func (pkt *TransportPacket) Receive(conn transporter) (payload interface{}, err error) {
var data []byte
var flags TransportFlag
flags, data, err = Receive(conn, pkt.buf)
if err != nil {
return
}
// Special packet to indicate end response
if len(data) == 0 && flags == 0 {
return nil, nil
}
pkt.flags = flags
laddr, raddr := conn.LocalAddr(), conn.RemoteAddr()
logging.Tracef("read %v bytes on connection %v<-%v", len(data), laddr, raddr)
// de-compression
if data, err = pkt.decompress(data); err != nil {
return
}
// decoding
if payload, err = pkt.decode(data); err != nil {
return
}
return
}
示例11: handleUpdateIndexPartnMap
func (s *storageMgr) handleUpdateIndexPartnMap(cmd Message) {
logging.Tracef("StorageMgr::handleUpdateIndexPartnMap %v", cmd)
indexPartnMap := cmd.(*MsgUpdatePartnMap).GetIndexPartnMap()
s.indexPartnMap = CopyIndexPartnMap(indexPartnMap)
s.supvCmdch <- &MsgSuccess{}
}
示例12: handleUpdateIndexPartnMap
func (s *scanCoordinator) handleUpdateIndexPartnMap(cmd Message) {
s.mu.Lock()
defer s.mu.Unlock()
logging.Tracef("ScanCoordinator::handleUpdateIndexPartnMap %v", cmd)
indexPartnMap := cmd.(*MsgUpdatePartnMap).GetIndexPartnMap()
s.indexPartnMap = CopyIndexPartnMap(indexPartnMap)
s.supvCmdch <- &MsgSuccess{}
}
示例13: connState
func (s *httpServer) connState(conn net.Conn, state http.ConnState) {
raddr := conn.RemoteAddr()
logging.Tracef("%s connState for %q : %v\n", s.logPrefix, raddr, state)
s.mu.Lock()
defer s.mu.Unlock()
if s.lis != nil && state == http.StateNew {
s.conns = append(s.conns, conn)
}
}
示例14: receive
func receive(rch chan []interface{}) {
// bucket -> Opcode -> #count
counts := make(map[string]map[mcd.CommandCode]int)
var tick <-chan time.Time
if options.stats > 0 {
tick = time.Tick(time.Millisecond * time.Duration(options.stats))
}
finTimeout := time.After(time.Millisecond * time.Duration(options.duration))
loop:
for {
select {
case msg, ok := <-rch:
if ok == false {
break loop
}
bucket, e := msg[0].(string), msg[1].(*mc.DcpEvent)
if e.Opcode == mcd.DCP_MUTATION {
logging.Tracef("DcpMutation KEY -- %v\n", string(e.Key))
logging.Tracef(" %v\n", string(e.Value))
}
if _, ok := counts[bucket]; !ok {
counts[bucket] = make(map[mcd.CommandCode]int)
}
if _, ok := counts[bucket][e.Opcode]; !ok {
counts[bucket][e.Opcode] = 0
}
counts[bucket][e.Opcode]++
case <-tick:
for bucket, m := range counts {
logging.Infof("%q %s\n", bucket, sprintCounts(m))
}
logging.Infof("\n")
case <-finTimeout:
break loop
}
}
}
示例15: sendKeyVersions
// send mutations for a set of vbuckets, update vbucket channels based on
// StreamBegin and StreamEnd.
func (c *Client) sendKeyVersions(
vbs []*common.VbKeyVersions,
vbChans map[string]chan interface{},
quitch chan []string) []string {
var idx int
for _, vb := range vbs {
if len(vb.Kvs) == 0 {
logging.Warnf("%v empty mutations\n", c.logPrefix)
continue
}
fin, l := false, len(vb.Kvs)
if vb.Kvs[0].Commands[0] == common.StreamBegin { // first mutation
vbChans[vb.Uuid], idx = c.addVbucket(vb.Uuid)
logging.Tracef(
"%v mapped vbucket {%v,%v}\n",
c.logPrefixes[idx], vb.Bucket, vb.Vbucket)
}
if vb.Kvs[l-1].Commands[0] == common.StreamEnd { // last mutation
fin = true
}
select {
case vbChans[vb.Uuid] <- vb:
if fin {
logging.Tracef(
"%v {%v,%v} ended\n", c.logPrefix, vb.Bucket, vb.Vbucket)
c.delVbucket(vb.Uuid)
delete(vbChans, vb.Uuid)
}
case msg := <-quitch:
return msg
}
}
return nil
}