本文整理汇总了Golang中github.com/couchbase/indexing/secondary/logging.Debugf函数的典型用法代码示例。如果您正苦于以下问题:Golang Debugf函数的具体用法?Golang Debugf怎么用?Golang Debugf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Debugf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: GetNodeListForBuckets
//
// Get the set of nodes for all the given buckets
//
func (p *ProjectorClientEnvImpl) GetNodeListForBuckets(buckets []string) (map[string]string, error) {
logging.Debugf("ProjectorCLientEnvImpl::getNodeListForBuckets(): start")
nodes := make(map[string]string)
for _, bucket := range buckets {
bucketRef, err := couchbase.GetBucket(COUCHBASE_INTERNAL_BUCKET_URL, DEFAULT_POOL_NAME, bucket)
if err != nil {
return nil, err
}
if err := bucketRef.Refresh(); err != nil {
return nil, err
}
for _, node := range bucketRef.NodeAddresses() {
// TODO: This may not work for cluster_run when all processes are run in the same node. Need to check.
logging.Debugf("ProjectorCLientEnvImpl::getNodeListForBuckets(): node=%v for bucket %v", node, bucket)
nodes[node] = node
}
}
return nodes, nil
}
示例2: validateActiveVb
func (p *ProjectorAdmin) validateActiveVb(buckets []string, activeTimestamps []*protobuf.TsVbuuid) bool {
for _, bucket := range buckets {
for vb := 0; vb < NUM_VB; vb++ {
found := false
for _, ts := range activeTimestamps {
if ts.GetBucket() == bucket {
for _, ts_vb := range ts.GetVbnos() {
if uint32(vb) == ts_vb {
if found {
logging.Debugf("validateActiveVb(): find duplicate active timestamp for bucket %s vb %d", bucket, vb)
return false
}
found = true
}
}
}
}
if !found {
logging.Debugf("validateActiveVb(): Cannot find active timestamp for bucket %s vb %d", bucket, vb)
return false
}
}
}
return true
}
示例3: GetClientForNode
//
// Get the projector client for the given node
//
func (p *ProjectorStreamClientFactoryImpl) GetClientForNode(server string) ProjectorStreamClient {
var projAddr string
if host, port, err := net.SplitHostPort(server); err == nil {
if common.IsIPLocal(host) {
if port == KV_DCP_PORT {
projAddr = LOCALHOST + ":" + PROJECTOR_PORT
} else {
iportProj, _ := strconv.Atoi(PROJECTOR_PORT)
iportKV, _ := strconv.Atoi(port)
iportKV0, _ := strconv.Atoi(KV_DCP_PORT_CLUSTER_RUN)
//In cluster_run, port number increments by 2
nodeNum := (iportKV - iportKV0) / 2
p := iportProj + nodeNum
projAddr = LOCALHOST + ":" + strconv.Itoa(p)
}
logging.Debugf("StreamAdmin::GetClientForNode(): Local Projector Addr: %v", projAddr)
} else {
projAddr = host + ":" + PROJECTOR_PORT
logging.Debugf("StreamAdmin::GetClientForNode(): Remote Projector Addr: %v", projAddr)
}
}
//create client for node's projectors
config := common.SystemConfig.SectionConfig("manager.projectorclient.", true)
maxvbs := common.SystemConfig["maxVbuckets"].Int()
ap := projectorC.NewClient(HTTP_PREFIX+projAddr+"/adminport/", maxvbs, config)
return ap
}
示例4: runProtocol
//
// run server (as leader or follower)
//
func (s *Coordinator) runProtocol(leader string) (err error) {
host := s.getHostUDPAddr()
// If this host is the leader, then start the leader server.
// Otherwise, start the followerCoordinator.
if leader == host {
logging.Debugf("Coordinator.runServer() : Local Coordinator %s is elected as leader. Leading ...", leader)
s.state.setStatus(protocol.LEADING)
// start other master services if this node is a candidate as master
s.idxMgr.startMasterService()
defer s.idxMgr.stopMasterService()
err = protocol.RunLeaderServer(s.getHostTCPAddr(), s.listener, s, s, s.factory, s.skillch)
} else {
logging.Debugf("Coordinator.runServer() : Remote Coordinator %s is elected as leader. Following ...", leader)
s.state.setStatus(protocol.FOLLOWING)
leaderAddr := s.findMatchingPeerTCPAddr(leader)
if len(leaderAddr) == 0 {
return NewError(ERROR_COOR_ELECTION_FAIL, NORMAL, COORDINATOR, nil,
fmt.Sprintf("Index Coordinator cannot find matching TCP addr for leader "+leader))
}
err = protocol.RunFollowerServer(s.getHostTCPAddr(), leaderAddr, s, s, s.factory, s.skillch)
}
return err
}
示例5: calcQueueLenFromMemQuota
//Calculate mutation queue length from memory quota
func (m *mutationMgr) calcQueueLenFromMemQuota() uint64 {
memQuota := m.config["settings.memory_quota"].Uint64()
maxVbLen := m.config["settings.maxVbQueueLength"].Uint64()
maxVbLenDef := m.config["settings.maxVbQueueLength"].DefaultVal.(uint64)
//if there is a user specified value, use that
if maxVbLen != 0 {
logging.Debugf("MutationMgr:: Set maxVbQueueLength %v", maxVbLen)
return maxVbLen
} else {
//Formula for calculation(see MB-14876)
//Below 2GB - 5000 per vbucket
//2GB to 4GB - 8000 per vbucket
//Above 4GB - 10000 per vbucket
if memQuota <= 2*1024*1024*1024 {
maxVbLen = 5000
} else if memQuota <= 4*1024*1024*1024 {
maxVbLen = 8000
} else {
maxVbLen = maxVbLenDef
}
logging.Debugf("MutationMgr:: Set maxVbQueueLength %v", maxVbLen)
return maxVbLen
}
}
示例6: handleDeleteInstances
//
// This function computes topology changes.
//
func (s *StreamManager) handleDeleteInstances(
streamId common.StreamId,
bucket string,
oldTopology *IndexTopology,
newTopology *IndexTopology,
fromState []common.IndexState,
toState []common.IndexState) error {
if oldTopology == nil || oldTopology.Version == newTopology.Version {
return nil
}
var changes []*changeRecord = nil
for _, newDefn := range newTopology.Definitions {
if oldTopology != nil {
oldDefn := oldTopology.FindIndexDefinition(newDefn.Bucket, newDefn.Name)
changes = append(changes, s.addInstancesToChangeList(oldDefn, &newDefn, fromState, toState)...)
} else {
changes = append(changes, s.addInstancesToChangeList(nil, &newDefn, fromState, toState)...)
}
}
var toBeDeleted []uint64 = nil
for _, change := range changes {
logging.Debugf("StreamManager.handleDeleteInstances(): adding inst '%v' to change list.", change.instance.InstId)
toBeDeleted = append(toBeDeleted, change.instance.InstId)
}
logging.Debugf("StreamManager.handleDeleteInstances(): len(toBeDeleted) '%v'", len(toBeDeleted))
return s.removeIndexInstances(streamId, bucket, toBeDeleted)
}
示例7: run
func (s *Stream) run() {
logging.Debugf("Stream.run(): starts")
defer s.receiver.Close()
for {
select {
case mut := <-s.mutch:
func() {
defer func() {
if r := recover(); r != nil {
logging.Debugf("panic in Stream.run() : error ignored. Error = %v\n", r)
}
}()
switch d := mut.(type) {
case ([]*data.VbKeyVersions):
logging.Debugf("Stream.run(): recieve VbKeyVersion")
s.handleVbKeyVersions(d)
case dataport.ConnectionError:
logging.Debugf("Stream.run(): recieve ConnectionError")
s.handler.HandleConnectionError(s.id, d)
}
}()
case <-s.stopch:
logging.Debugf("Stream.run(): stop")
return
}
}
}
示例8: LogProposal
// TODO : what to do if createIndex returns error
func (c *Coordinator) LogProposal(proposal protocol.ProposalMsg) error {
if c.GetStatus() == protocol.LEADING {
switch common.OpCode(proposal.GetOpCode()) {
case OPCODE_ADD_IDX_DEFN:
success := c.createIndex(proposal.GetKey(), proposal.GetContent())
logging.Debugf("Coordinator.LogProposal(): (createIndex) success = %s", success)
case OPCODE_DEL_IDX_DEFN:
success := c.deleteIndex(proposal.GetKey())
logging.Debugf("Coordinator.LogProposal(): (deleteIndex) success = %s", success)
}
}
switch common.OpCode(proposal.GetOpCode()) {
case OPCODE_NOTIFY_TIMESTAMP:
timestamp, err := unmarshallTimestampSerializable(proposal.GetContent())
if err == nil {
c.idxMgr.notifyNewTimestamp(timestamp)
} else {
logging.Debugf("Coordinator.LogProposal(): error when unmarshalling timestamp. Ignore timestamp. Error=%s", err.Error())
}
}
c.updateRequestOnNewProposal(proposal)
return nil
}
示例9: StartStream
//
// Start a stream for listening only. This will not trigger the mutation source to start
// streaming mutations. Need to call AddIndexForBucket() or AddIndexForAllBuckets()
// to kick off the mutation source to start streaming the mutations for indexes in bucket(s).
//
func (s *StreamManager) StartStream(streamId common.StreamId) error {
s.mutex.Lock()
defer s.mutex.Unlock()
logging.Debugf("StreamManager.StartStream(): start")
if s.isClosed {
return nil
}
// Verify if the stream is already open. Just an no-op.
if stream, ok := s.streams[streamId]; ok && stream.status {
logging.Debugf("StreamManager.StartStream(): stream %v already started", streamId)
return nil
}
// Create a new stream. This will prepare the reciever to be ready for receving mutation.
port := getPortForStreamId(streamId)
stream, err := newStream(streamId, port, s.handler)
if err != nil {
return err
}
err = stream.start()
if err != nil {
return err
}
logging.Debugf("StreamManager.StartStream(): stream %v started successfully on port %v", streamId, port)
s.streams[streamId] = stream
stream.status = true
return nil
}
示例10: runElection
//
// run election
//
func (s *Coordinator) runElection() (leader string, err error) {
host := s.getHostUDPAddr()
peers := s.getPeerUDPAddr()
// Create an election site to start leader election.
logging.Debugf("Coordinator.runElection(): Local Coordinator %s start election", host)
logging.Debugf("Coordinator.runElection(): Peer in election")
for _, peer := range peers {
logging.Debugf(" peer : %s", peer)
}
s.site, err = protocol.CreateElectionSite(host, peers, s.factory, s, false)
if err != nil {
return "", err
}
// blocked until leader is elected. coordinator.Terminate() will unblock this.
resultCh := s.site.StartElection()
leader, ok := <-resultCh
if !ok {
return "", NewError(ERROR_COOR_ELECTION_FAIL, NORMAL, COORDINATOR, nil,
fmt.Sprintf("Index Coordinator Election Fails"))
}
return leader, nil
}
示例11: handleRestoreIndexMetadataRequest
//
// Restore semantic:
// 1) Each index is associated with the <IndexDefnId, IndexerId>. IndexDefnId is unique for each index defnition,
// and IndexerId is unique among the index nodes. Note that IndexDefnId cannot be reused.
// 2) Index defn exists for the given <IndexDefnId, IndexerId> in current repository. No action will be applied during restore.
// 3) Index defn is deleted or missing in current repository. Index Defn restored from backup if bucket exists.
// - Index defn of the same <bucket, name> exists. It will rename the index to <index name>_restore_<seqNo>
// - Bucket does not exist. It will restore an index defn with a non-existent bucket.
//
func (m *requestHandlerContext) handleRestoreIndexMetadataRequest(w http.ResponseWriter, r *http.Request) {
if !doAuth(r, w, m.clusterUrl) {
return
}
image := m.convertIndexMetadataRequest(r)
if image == nil {
send(w, &RestoreResponse{Code: RESP_ERROR, Error: "Unable to process request input"})
return
}
indexerHostMap := make(map[common.IndexerId]string)
current, err := m.getIndexMetadata(m.mgr.getServiceAddrProvider().(*common.ClusterInfoCache), indexerHostMap)
if err != nil {
send(w, &RestoreResponse{Code: RESP_ERROR, Error: "Unable to get the latest index metadata for restore"})
return
}
context := &RestoreContext{idxToRestore: make(map[common.IndexerId][]common.IndexDefn),
idxToResolve: make(map[common.IndexerId][]common.IndexDefn)}
// Figure out what index to restore that has the same IndexDefnId
for _, imeta := range image.Metadata {
found := false
for _, cmeta := range current.Metadata {
if imeta.IndexerId == cmeta.IndexerId {
m.findIndexToRestoreById(&imeta, &cmeta, context)
found = true
}
}
if !found {
logging.Debugf("requestHandler.handleRestoreIndexMetadataRequest(): cannot find matching indexer id %s", imeta.IndexerId)
for _, idefn := range imeta.IndexDefinitions {
logging.Debugf("requestHandler.handleRestoreIndexMetadataRequest(): adding index definition (%v,%v) to to-be-resolve list", idefn.Bucket, idefn.Name)
context.idxToResolve[common.IndexerId(imeta.IndexerId)] =
append(context.idxToResolve[common.IndexerId(imeta.IndexerId)], idefn)
}
}
}
// Figure out what index to restore that has the same bucket and name
for indexerId, idxs := range context.idxToResolve {
for _, idx := range idxs {
m.findIndexToRestoreByName(current, idx, indexerId, context)
}
}
// recreate index
success := m.restoreIndex(current, context, indexerHostMap)
if success {
send(w, &RestoreResponse{Code: RESP_SUCCESS})
return
}
send(w, &RestoreResponse{Code: RESP_ERROR, Error: "Unable to restore metadata"})
}
示例12: handleDeleteBucket
func (m *LifecycleMgr) handleDeleteBucket(bucket string, content []byte) error {
result := error(nil)
if len(content) == 0 {
return errors.New("invalid argument")
}
streamId := common.StreamId(content[0])
topology, err := m.repo.GetTopologyByBucket(bucket)
if err == nil {
/*
// if there is an error getting the UUID, this means that
// the node is not able to connect to pool service in order
// to fetch the bucket UUID. Return an error and skip.
uuid, err := m.getBucketUUID(bucket)
if err != nil {
logging.Errorf("LifecycleMgr.handleDeleteBucket() : Encounter when connecting to pool service = %v", err)
return err
}
*/
// At this point, we are able to connect to pool service. If pool
// does not contain the bucket, then we delete all index defn in
// the bucket. Otherwise, delete index defn that does not have the
// same bucket UUID. Note that any other create index request will
// be blocked while this call is run.
definitions := make([]IndexDefnDistribution, len(topology.Definitions))
copy(definitions, topology.Definitions)
for _, defnRef := range definitions {
if defn, err := m.repo.GetIndexDefnById(common.IndexDefnId(defnRef.DefnId)); err == nil {
logging.Debugf("LifecycleMgr.handleDeleteBucket() : index instance: id %v, streamId %v.",
defn.DefnId, defnRef.Instances[0].StreamId)
// delete index defn from the bucket if bucket uuid is not specified or
// index does *not* belong to bucket uuid
if /* (uuid == common.BUCKET_UUID_NIL || defn.BucketUUID != uuid) && */
streamId == common.NIL_STREAM || common.StreamId(defnRef.Instances[0].StreamId) == streamId {
if err := m.DeleteIndex(common.IndexDefnId(defn.DefnId), false); err != nil {
result = err
}
}
} else {
logging.Debugf("LifecycleMgr.handleDeleteBucket() : Cannot find index instance %v. Skip.", defnRef.DefnId)
}
}
} else if err != fdb.RESULT_KEY_NOT_FOUND {
result = err
}
return result
}
示例13: runTimestampKeeper
func (m *IndexManager) runTimestampKeeper() {
defer logging.Debugf("IndexManager.runTimestampKeeper() : terminate")
inboundch := m.timer.getOutputChannel()
persistTimestamp := true // save the first timestamp always
lastPersistTime := uint64(time.Now().UnixNano())
timestamps, err := m.repo.GetStabilityTimestamps()
if err != nil {
// TODO : Determine timestamp not exist versus forestdb error
logging.Errorf("IndexManager.runTimestampKeeper() : cannot get stability timestamp from repository. Create a new one.")
timestamps = createTimestampListSerializable()
}
for {
select {
case <-m.timekeeperStopCh:
return
case timestamp, ok := <-inboundch:
if !ok {
return
}
gometaC.SafeRun("IndexManager.runTimestampKeeper()",
func() {
timestamps.addTimestamp(timestamp)
persistTimestamp = persistTimestamp ||
uint64(time.Now().UnixNano())-lastPersistTime > m.timestampPersistInterval
if persistTimestamp {
if err := m.repo.SetStabilityTimestamps(timestamps); err != nil {
logging.Errorf("IndexManager.runTimestampKeeper() : cannot set stability timestamp into repository.")
} else {
logging.Debugf("IndexManager.runTimestampKeeper() : saved stability timestamp to repository")
persistTimestamp = false
lastPersistTime = uint64(time.Now().UnixNano())
}
}
data, err := marshallTimestampSerializable(timestamp)
if err != nil {
logging.Debugf(
"IndexManager.runTimestampKeeper(): error when marshalling timestamp. Ignore timestamp. Error=%s",
err.Error())
} else {
m.coordinator.NewRequest(uint32(OPCODE_NOTIFY_TIMESTAMP), "Stability Timestamp", data)
}
})
}
}
}
示例14: restartStream
//
// Add index instances to a specific projector node
//
func (worker *adminWorker) restartStream(timestamps []*protobuf.TsVbuuid, doneCh chan *adminWorker) {
defer func() {
doneCh <- worker
}()
logging.Debugf("adminWorker::restartStream(): start")
// Get projector client for the particular node. This function does not
// return an error even if the server is an invalid host name, but subsequent
// call to client may fail. Also note that there is no method to close the client
// (no need to close upon termination).
client := worker.admin.factory.GetClientForNode(worker.server)
if client == nil {
logging.Debugf("adminWorker::restartStream(): no client returns from factory")
return
}
// open the stream for the specific node for the set of <bucket, timestamp>
topic := getTopicForStreamId(worker.streamId)
retry := true
startTime := time.Now().Unix()
for retry {
select {
case <-worker.killch:
return
default:
response, err := client.RestartVbuckets(topic, timestamps)
if err == nil {
// no error, it is successful for this node
worker.activeTimestamps = response.GetActiveTimestamps()
worker.err = nil
return
}
timestamps, err = worker.shouldRetryRestartVbuckets(timestamps, response, err)
if err != nil {
// Either it is a non-recoverable error or an error that cannot be retry by this worker.
// Terminate this worker.
worker.activeTimestamps = response.GetActiveTimestamps()
worker.err = err
return
}
retry = time.Now().Unix()-startTime < MAX_PROJECTOR_RETRY_ELAPSED_TIME
}
}
// When we reach here, it passes the elaspse time that the projector is supposed to response.
// Projector may die or it can be a network partition, need to return an error since it may
// require another worker to retry.
worker.err = NewError4(ERROR_STREAM_PROJECTOR_TIMEOUT, NORMAL, STREAM, "Projector Call timeout after retry.")
}
示例15: repairEndpoint
//
// Repair endpoint for a specific projector node
//
func (worker *adminWorker) repairEndpoint(endpoint string, doneCh chan *adminWorker) {
defer func() {
doneCh <- worker
}()
logging.Debugf("adminWorker::repairEndpoint(): start")
// Get projector client for the particular node. This function does not
// return an error even if the server is an invalid host name, but subsequent
// call to client may fail. Also note that there is no method to close the client
// (no need to close upon termination).
client := worker.admin.factory.GetClientForNode(worker.server)
if client == nil {
logging.Debugf("adminWorker::repairEndpoints(): no client returns from factory")
return
}
// open the stream for the specific node for the set of <bucket, timestamp>
topic := getTopicForStreamId(worker.streamId)
retry := true
startTime := time.Now().Unix()
for retry {
select {
case <-worker.killch:
return
default:
err := client.RepairEndpoints(topic, []string{endpoint})
if err == nil {
// no error, it is successful for this node
worker.err = nil
return
}
logging.Debugf("adminWorker::repairEndpiont(): Error encountered when calling RepairEndpoint. Error=%v", err.Error())
if strings.Contains(err.Error(), projectorC.ErrorTopicMissing.Error()) {
// It is OK if topic is missing
worker.err = nil
return
}
retry = time.Now().Unix()-startTime < MAX_PROJECTOR_RETRY_ELAPSED_TIME
}
}
// When we reach here, it passes the elaspse time that the projector is supposed to response.
// Projector may die or it can be a network partition, need to return an error since it may
// require another worker to retry.
worker.err = NewError4(ERROR_STREAM_PROJECTOR_TIMEOUT, NORMAL, STREAM, "Projector Call timeout after retry.")
}