本文整理汇总了Golang中github.com/couchbase/indexing/secondary/logging.Errorf函数的典型用法代码示例。如果您正苦于以下问题:Golang Errorf函数的具体用法?Golang Errorf怎么用?Golang Errorf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Errorf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: newTimer
//
// Create a timer that keeps track of the timestamp history across streams and buckets
//
func newTimer(repo *MetadataRepo) *Timer {
timestamps := make(map[common.StreamId]timestampHistoryBucketMap)
tickers := make(map[common.StreamId]tickerBucketMap)
stopchs := make(map[common.StreamId]stopchBucketMap)
outch := make(chan *timestampSerializable, TIMESTAMP_CHANNEL_SIZE)
timer := &Timer{timestamps: timestamps,
tickers: tickers,
stopchs: stopchs,
outch: outch,
ready: false}
savedTimestamps, err := repo.GetStabilityTimestamps()
if err == nil {
for _, timestamp := range savedTimestamps.Timestamps {
ts, err := unmarshallTimestamp(timestamp.Timestamp)
if err != nil {
logging.Errorf("Timer.newTimer() : unable to unmarshall timestamp for bucket %v. Skip initialization.",
timestamp.Bucket)
continue
}
timer.start(common.StreamId(timestamp.StreamId), timestamp.Bucket)
for vb, seqno := range ts.Seqnos {
timer.increment(common.StreamId(timestamp.StreamId), timestamp.Bucket, uint32(vb), ts.Vbuuids[vb], seqno)
}
logging.Errorf("Timer.newTimer() : initialized timestamp for bucket %v from repository.", timestamp.Bucket)
}
} else {
// TODO : Determine timestamp not exist versus forestdb error
logging.Errorf("Timer.newTimer() : cannot get stability timestamp from repository. Skip initialization.")
}
return timer
}
示例2: startBucket
func startBucket(cluster, bucketn string, rch chan []interface{}) int {
defer func() {
if r := recover(); r != nil {
logging.Errorf("Recovered from panic %v", r)
logging.Errorf(logging.StackTrace())
}
}()
logging.Infof("Connecting with %q\n", bucketn)
b, err := common.ConnectBucket(cluster, "default", bucketn)
mf(err, "bucket")
dcpConfig := map[string]interface{}{
"genChanSize": 10000,
"dataChanSize": 10000,
}
dcpFeed, err := b.StartDcpFeed("rawupr", uint32(0), 0xABCD, dcpConfig)
mf(err, "- upr")
vbnos := listOfVbnos(options.maxVbno)
flogs, err := b.GetFailoverLogs(0xABCD, vbnos, dcpConfig)
mf(err, "- dcp failoverlogs")
if options.printflogs {
printFlogs(vbnos, flogs)
}
go startDcp(dcpFeed, flogs)
for {
e, ok := <-dcpFeed.C
if ok == false {
logging.Infof("Closing for bucket %q\n", bucketn)
}
rch <- []interface{}{bucketn, e}
}
}
示例3: updateSnapInFilter
//updates snapshot information in bucket filter
func (r *mutationStreamReader) updateSnapInFilter(meta *MutationMeta,
snapStart uint64, snapEnd uint64) {
r.syncLock.Lock()
defer r.syncLock.Unlock()
if filter, ok := r.bucketFilterMap[meta.bucket]; ok {
if snapEnd > filter.Snapshots[meta.vbucket][1] {
//store the existing snap marker in prevSnap map
prevSnap := r.bucketPrevSnapMap[meta.bucket]
prevSnap.Snapshots[meta.vbucket][0] = filter.Snapshots[meta.vbucket][0]
prevSnap.Snapshots[meta.vbucket][1] = filter.Snapshots[meta.vbucket][1]
prevSnap.Vbuuids[meta.vbucket] = filter.Vbuuids[meta.vbucket]
filter.Snapshots[meta.vbucket][0] = snapStart
filter.Snapshots[meta.vbucket][1] = snapEnd
} else {
logging.Errorf("MutationStreamReader::updateSnapInFilter Skipped "+
"Snapshot %v-%v for vb %v %v %v. Current Filter %v", snapStart,
snapEnd, meta.vbucket, meta.bucket, r.streamId,
filter.Snapshots[meta.vbucket][1])
}
} else {
logging.Errorf("MutationStreamReader::updateSnapInFilter Missing"+
"bucket %v in Filter for Stream %v", meta.bucket, r.streamId)
}
}
示例4: newMetaBridgeClient
func newMetaBridgeClient(
cluster string, config common.Config) (c *metadataClient, err error) {
b := &metadataClient{
cluster: cluster,
finch: make(chan bool),
adminports: make(map[string]common.IndexerId),
loads: make(map[common.IndexDefnId]*loadHeuristics),
}
b.topology = make(map[common.IndexerId]map[common.IndexDefnId]*mclient.IndexMetadata)
b.servicesNotifierRetryTm = config["servicesNotifierRetryTm"].Int()
// initialize meta-data-provide.
uuid, err := common.NewUUID()
if err != nil {
logging.Errorf("Could not generate UUID in common.NewUUID\n")
return nil, err
}
b.mdClient, err = mclient.NewMetadataProvider(uuid.Str())
if err != nil {
return nil, err
}
if err := b.updateIndexerList(false); err != nil {
logging.Errorf("updateIndexerList(): %v\n", err)
b.mdClient.Close()
return nil, err
}
b.Refresh()
go b.watchClusterChanges() // will also update the indexer list
return b, nil
}
示例5: NewGSIIndexer
// NewGSIIndexer manage new set of indexes under namespace->keyspace,
// also called as, pool->bucket.
// will return an error when,
// - GSI cluster is not available.
// - network partitions / errors.
func NewGSIIndexer(
clusterURL, namespace, keyspace string) (datastore.Indexer, errors.Error) {
l.SetLogLevel(l.Info)
gsi := &gsiKeyspace{
clusterURL: clusterURL,
namespace: namespace,
keyspace: keyspace,
indexes: make(map[uint64]*secondaryIndex), // defnID -> index
primaryIndexes: make(map[uint64]*secondaryIndex),
}
gsi.logPrefix = fmt.Sprintf("GSIC[%s; %s]", namespace, keyspace)
// get the singleton-client
client, err := getSingletonClient(clusterURL)
if err != nil {
l.Errorf("%v GSI instantiation failed: %v", gsi.logPrefix, err)
return nil, errors.NewError(err, "GSI client instantiation failed")
}
gsi.gsiClient = client
// refresh indexes for this service->namespace->keyspace
if err := gsi.Refresh(); err != nil {
l.Errorf("%v Refresh() failed: %v", gsi.logPrefix, err)
return nil, err
}
l.Debugf("%v instantiated ...", gsi.logPrefix)
return gsi, nil
}
示例6: insertPrimaryIndex
func (fdb *fdbSlice) insertPrimaryIndex(key []byte, docid []byte, workerId int) {
var err error
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s", fdb.id, fdb.idxInstId, docid)
//check if the docid exists in the main index
t0 := time.Now()
if _, err = fdb.main[workerId].GetKV(key); err == nil {
fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
//skip
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Key %v Already Exists. "+
"Primary Index Update Skipped.", fdb.id, fdb.idxInstId, string(docid))
} else if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
"mainindex entry %v", fdb.id, fdb.idxInstId, err)
} else if err == forestdb.RESULT_KEY_NOT_FOUND {
//set in main index
t0 := time.Now()
if err = fdb.main[workerId].SetKV(key, nil); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
"Skipped Key %s. Error %v", fdb.id, fdb.idxInstId, string(docid), err)
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
}
}
示例7: Send
func Send(conn transporter, buf []byte, flags TransportFlag, payload []byte) (err error) {
var n int
// transport framing
l := pktLenSize + pktFlagSize
if maxLen := len(buf); l > maxLen {
logging.Errorf("sending packet length %v > %v\n", l, maxLen)
err = ErrorPacketOverflow
return
}
a, b := pktLenOffset, pktLenOffset+pktLenSize
binary.BigEndian.PutUint32(buf[a:b], uint32(len(payload)))
a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
binary.BigEndian.PutUint16(buf[a:b], uint16(flags))
if n, err = conn.Write(buf[:pktDataOffset]); err == nil {
if n, err = conn.Write(payload); err == nil && n != len(payload) {
logging.Errorf("transport wrote only %v bytes for payload\n", n)
err = ErrorPacketWrite
}
laddr, raddr := conn.LocalAddr(), conn.RemoteAddr()
logging.Tracef("wrote %v bytes on connection %v->%v", len(payload), laddr, raddr)
} else if n != pktDataOffset {
logging.Errorf("transport wrote only %v bytes for header\n", n)
err = ErrorPacketWrite
}
return
}
示例8: dcpRequestStream
func (feed *DcpFeed) dcpRequestStream(
vb uint16, opaque uint16, flags uint32,
vbuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
prefix := feed.logPrefix
vbm := feed.bucket.VBServerMap()
if l := len(vbm.VBucketMap); int(vb) >= l {
fmsg := "%v ##%x invalid vbucket id %d >= %d\n"
logging.Errorf(fmsg, prefix, opaque, vb, l)
return ErrorInvalidVbucket
}
masterID := vbm.VBucketMap[vb][0]
master := feed.bucket.getMasterNode(masterID)
if master == "" {
fmsg := "%v ##%x notFound master node for vbucket %d\n"
logging.Errorf(fmsg, prefix, opaque, vb)
return ErrorInvalidVbucket
}
singleFeed, ok := feed.nodeFeeds[master]
if !ok {
fmsg := "%v ##%x notFound DcpFeed host: %q vb:%d\n"
logging.Errorf(fmsg, prefix, opaque, master, vb)
return memcached.ErrorInvalidFeed
}
err := singleFeed.dcpFeed.DcpRequestStream(
vb, opaque, flags, vbuuid, startSequence, endSequence,
snapStart, snapEnd)
if err != nil {
return err
}
return nil
}
示例9: dcpCloseStream
func (feed *DcpFeed) dcpCloseStream(vb, opaqueMSB uint16) error {
prefix := feed.logPrefix
vbm := feed.bucket.VBServerMap()
if l := len(vbm.VBucketMap); int(vb) >= l {
fmsg := "%v ##%x invalid vbucket id %d >= %d\n"
logging.Errorf(fmsg, prefix, opaqueMSB, vb, l)
return ErrorInvalidVbucket
}
masterID := vbm.VBucketMap[vb][0]
master := feed.bucket.getMasterNode(masterID)
if master == "" {
fmsg := "%v ##%x notFound master node for vbucket %d\n"
logging.Errorf(fmsg, prefix, opaqueMSB, vb)
return ErrorInvalidVbucket
}
singleFeed, ok := feed.nodeFeeds[master]
if !ok {
fmsg := "%v ##%x notFound DcpFeed host: %q vb:%d"
logging.Errorf(fmsg, prefix, opaqueMSB, master, vb)
return memcached.ErrorInvalidFeed
}
if err := singleFeed.dcpFeed.CloseStream(vb, opaqueMSB); err != nil {
return err
}
return nil
}
示例10: Receive
func Receive(conn transporter, buf []byte) (flags TransportFlag, payload []byte, err error) {
// transport de-framing
if err = fullRead(conn, buf[:pktDataOffset]); err != nil {
if err == io.EOF {
logging.Tracef("receiving packet: %v\n", err)
} else {
logging.Errorf("receiving packet: %v\n", err)
}
return
}
a, b := pktLenOffset, pktLenOffset+pktLenSize
pktlen := binary.BigEndian.Uint32(buf[a:b])
a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
flags = TransportFlag(binary.BigEndian.Uint16(buf[a:b]))
if maxLen := uint32(len(buf)); pktlen > maxLen {
logging.Errorf("receiving packet length %v > %v\n", pktlen, maxLen)
err = ErrorPacketOverflow
return
}
if err = fullRead(conn, buf[:pktlen]); err != nil {
if err == io.EOF {
logging.Tracef("receiving packet: %v\n", err)
} else {
logging.Errorf("receiving packet: %v\n", err)
}
return
}
return flags, buf[:pktlen], err
}
示例11: deleteIndexesFromStream
func (k *kvSender) deleteIndexesFromStream(streamId c.StreamId, indexInstList []c.IndexInst,
respCh MsgChannel, stopCh StopChannel) {
addrs, err := k.getAllProjectorAddrs()
if err != nil {
logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error in fetching cluster info %v",
streamId, indexInstList[0].Defn.Bucket, err)
respCh <- &MsgError{
err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
severity: FATAL,
cause: err}}
return
}
var uuids []uint64
for _, indexInst := range indexInstList {
uuids = append(uuids, uint64(indexInst.InstId))
}
topic := getTopicForStreamId(streamId)
fn := func(r int, err error) error {
//clear the error before every retry
err = nil
for _, addr := range addrs {
execWithStopCh(func() {
ap := newProjClient(addr)
if ret := sendDelInstancesRequest(ap, topic, uuids); ret != nil {
logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v from %v",
streamId, indexInstList[0].Defn.Bucket, ret, addr)
//Treat TopicMissing/GenServer.Closed/InvalidBucket as success
if ret.Error() == projClient.ErrorTopicMissing.Error() ||
ret.Error() == c.ErrorClosed.Error() ||
ret.Error() == projClient.ErrorInvalidBucket.Error() {
logging.Infof("KVSender::deleteIndexesFromStream %v %v Treating %v As Success",
streamId, indexInstList[0].Defn.Bucket, ret)
} else {
err = ret
}
}
}, stopCh)
}
return err
}
rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
err = rh.Run()
if err != nil {
logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v",
streamId, indexInstList[0].Defn.Bucket, err)
respCh <- &MsgError{
err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
severity: FATAL,
cause: err}}
return
}
respCh <- &MsgSuccess{}
}
示例12: UpdateIndexInstance
func (m *LifecycleMgr) UpdateIndexInstance(bucket string, defnId common.IndexDefnId, state common.IndexState,
streamId common.StreamId, errStr string, buildTime []uint64) error {
topology, err := m.repo.GetTopologyByBucket(bucket)
if err != nil {
logging.Errorf("LifecycleMgr.handleTopologyChange() : index instance update fails. Reason = %v", err)
return err
}
changed := false
if state != common.INDEX_STATE_NIL {
changed = topology.UpdateStateForIndexInstByDefn(common.IndexDefnId(defnId), common.IndexState(state)) || changed
}
if streamId != common.NIL_STREAM {
changed = topology.UpdateStreamForIndexInstByDefn(common.IndexDefnId(defnId), common.StreamId(streamId)) || changed
}
changed = topology.SetErrorForIndexInstByDefn(common.IndexDefnId(defnId), errStr) || changed
if changed {
if err := m.repo.SetTopologyByBucket(bucket, topology); err != nil {
logging.Errorf("LifecycleMgr.handleTopologyChange() : index instance update fails. Reason = %v", err)
return err
}
}
return nil
}
示例13: Start
// Start is part of Server interface.
func (s *httpServer) Start() (err error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.lis != nil {
logging.Errorf("%v already started ...\n", s.logPrefix)
return ErrorServerStarted
}
if s.lis, err = net.Listen("tcp", s.srv.Addr); err != nil {
logging.Errorf("%v listen failed %v\n", s.logPrefix, err)
return err
}
// Server routine
go func() {
defer s.shutdown()
logging.Infof("%s starting ...\n", s.logPrefix)
err := s.srv.Serve(s.lis) // serve until listener is closed.
// TODO: look into error message and skip logging if Stop().
if err != nil {
logging.Errorf("%s %v\n", s.logPrefix, err)
}
}()
logging.PeriodicProfile(logging.Trace, s.srv.Addr, "goroutine")
return
}
示例14: Range
// Range scan index between low and high.
func (c *GsiScanClient) Range(
defnID uint64, low, high common.SecondaryKey, inclusion Inclusion,
distinct bool, limit int64, cons common.Consistency, vector *TsConsistency,
callb ResponseHandler) (error, bool) {
// serialize low and high values.
l, err := json.Marshal(low)
if err != nil {
return err, false
}
h, err := json.Marshal(high)
if err != nil {
return err, false
}
connectn, err := c.pool.Get()
if err != nil {
return err, false
}
healthy := true
defer func() { c.pool.Return(connectn, healthy) }()
conn, pkt := connectn.conn, connectn.pkt
req := &protobuf.ScanRequest{
DefnID: proto.Uint64(defnID),
Span: &protobuf.Span{
Range: &protobuf.Range{
Low: l, High: h, Inclusion: proto.Uint32(uint32(inclusion)),
},
},
Distinct: proto.Bool(distinct),
Limit: proto.Int64(limit),
Cons: proto.Uint32(uint32(cons)),
}
if vector != nil {
req.Vector = protobuf.NewTsConsistency(
vector.Vbnos, vector.Seqnos, vector.Vbuuids, vector.Crc64)
}
// ---> protobuf.ScanRequest
if err := c.sendRequest(conn, pkt, req); err != nil {
fmsg := "%v Range() request transport failed `%v`\n"
logging.Errorf(fmsg, c.logPrefix, err)
healthy = false
return err, false
}
cont, partial := true, false
for cont {
// <--- protobuf.ResponseStream
cont, healthy, err = c.streamResponse(conn, pkt, callb)
if err != nil { // if err, cont should have been set to false
fmsg := "%v Range() response failed `%v`\n"
logging.Errorf(fmsg, c.logPrefix, err)
} else { // partial succeeded
partial = true
}
}
return err, partial
}
示例15: processDelete
func (f *flusher) processDelete(mut *Mutation, docid []byte) {
idxInst, _ := f.indexInstMap[mut.uuid]
partnId := idxInst.Pc.GetPartitionIdByPartitionKey(mut.partnkey)
var partnInstMap PartitionInstMap
var ok bool
if partnInstMap, ok = f.indexPartnMap[mut.uuid]; !ok {
logging.Errorf("Flusher:processDelete Missing Partition Instance Map"+
"for IndexInstId: %v. Skipped Mutation Key: %v", mut.uuid, mut.key)
return
}
if partnInst := partnInstMap[partnId]; ok {
slice := partnInst.Sc.GetSliceByIndexKey(common.IndexKey(mut.key))
if err := slice.Delete(docid); err != nil {
logging.Errorf("Flusher::processDelete Error Deleting DocId: %v "+
"from Slice: %v", docid, slice.Id())
}
} else {
logging.Errorf("Flusher::processDelete Partition Instance not found "+
"for Id: %v. Skipped Mutation Key: %v", partnId, mut.key)
}
}