本文整理汇总了Golang中github.com/couchbase/indexing/secondary/common.CrashOnError函数的典型用法代码示例。如果您正苦于以下问题:Golang CrashOnError函数的具体用法?Golang CrashOnError怎么用?Golang CrashOnError使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了CrashOnError函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: ValidateBucket
func ValidateBucket(cluster, bucket string, uuids []string) bool {
var cinfo *common.ClusterInfoCache
url, err := common.ClusterAuthUrl(cluster)
if err == nil {
cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
}
if err != nil {
logging.Fatalf("Indexer::Fail to init ClusterInfoCache : %v", err)
common.CrashOnError(err)
}
cinfo.Lock()
defer cinfo.Unlock()
if err := cinfo.Fetch(); err != nil {
logging.Errorf("Indexer::Fail to init ClusterInfoCache : %v", err)
common.CrashOnError(err)
}
if nids, err := cinfo.GetNodesByBucket(bucket); err == nil && len(nids) != 0 {
// verify UUID
currentUUID := cinfo.GetBucketUUID(bucket)
for _, uuid := range uuids {
if uuid != currentUUID {
return false
}
}
return true
} else {
logging.Fatalf("Indexer::Error Fetching Bucket Info: %v Nids: %v", err, nids)
return false
}
}
示例2: deletePrimaryIndex
func (fdb *fdbSlice) deletePrimaryIndex(docid []byte, workerId int) {
//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
// fdb.id, fdb.idxInstId, docid)
if docid == nil {
common.CrashOnError(errors.New("Nil Primary Key"))
return
}
//docid -> key format
entry, err := NewPrimaryIndexEntry(docid)
common.CrashOnError(err)
//delete from main index
t0 := time.Now()
if err := fdb.main[workerId].DeleteKV(entry.Bytes()); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
"entry from main index for Doc %s. Error %v", fdb.id, fdb.idxInstId,
docid, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(entry.Bytes())))
}
示例3: handleGetGlobalTopology
func (c *clustMgrAgent) handleGetGlobalTopology(cmd Message) {
logging.Debugf("ClustMgr:handleGetGlobalTopology %v", cmd)
//get the latest topology from manager
metaIter, err := c.mgr.NewIndexDefnIterator()
if err != nil {
common.CrashOnError(err)
}
defer metaIter.Close()
indexInstMap := make(common.IndexInstMap)
for _, defn, err := metaIter.Next(); err == nil; _, defn, err = metaIter.Next() {
var idxDefn common.IndexDefn
idxDefn = *defn
t, e := c.mgr.GetTopologyByBucket(idxDefn.Bucket)
if e != nil {
common.CrashOnError(e)
}
inst := t.GetIndexInstByDefn(idxDefn.DefnId)
if inst == nil {
logging.Warnf("ClustMgr:handleGetGlobalTopology Index Instance Not "+
"Found For Index Definition %v. Ignored.", idxDefn)
continue
}
//for indexer, Ready state doesn't matter. Till index build,
//the index stays in Created state.
var state common.IndexState
instState := common.IndexState(inst.State)
if instState == common.INDEX_STATE_READY {
state = common.INDEX_STATE_CREATED
} else {
state = instState
}
idxInst := common.IndexInst{InstId: common.IndexInstId(inst.InstId),
Defn: idxDefn,
State: state,
Stream: common.StreamId(inst.StreamId),
}
indexInstMap[idxInst.InstId] = idxInst
}
c.supvCmdch <- &MsgClustMgrTopology{indexInstMap: indexInstMap}
}
示例4: OnIndexBuild
func (meta *metaNotifier) OnIndexBuild(indexDefnList []common.IndexDefnId, buckets []string) map[common.IndexInstId]error {
logging.Infof("clustMgrAgent::OnIndexBuild Notification "+
"Received for Build Index %v", indexDefnList)
respCh := make(MsgChannel)
var indexInstList []common.IndexInstId
for _, defnId := range indexDefnList {
indexInstList = append(indexInstList, common.IndexInstId(defnId))
}
meta.adminCh <- &MsgBuildIndex{indexInstList: indexInstList,
respCh: respCh,
bucketList: buckets}
//wait for response
if res, ok := <-respCh; ok {
switch res.GetMsgType() {
case CLUST_MGR_BUILD_INDEX_DDL_RESPONSE:
errMap := res.(*MsgBuildIndexResponse).GetErrorMap()
logging.Infof("clustMgrAgent::OnIndexBuild returns "+
"for Build Index %v", indexDefnList)
return errMap
case MSG_ERROR:
logging.Errorf("clustMgrAgent::OnIndexBuild Error "+
"for Build Index %v. Error %v.", indexDefnList, res)
err := res.(*MsgError).GetError()
errMap := make(map[common.IndexInstId]error)
for _, instId := range indexDefnList {
errMap[common.IndexInstId(instId)] = errors.New(err.String())
}
return errMap
default:
logging.Fatalf("clustMgrAgent::OnIndexBuild Unknown Response "+
"Received for Build Index %v. Response %v", indexDefnList, res)
common.CrashOnError(errors.New("Unknown Response"))
}
} else {
logging.Fatalf("clustMgrAgent::OnIndexBuild Unexpected Channel Close "+
"for Create Index %v", indexDefnList)
common.CrashOnError(errors.New("Unknown Response"))
}
return nil
}
示例5: OnIndexCreate
func (meta *metaNotifier) OnIndexCreate(indexDefn *common.IndexDefn) error {
logging.Infof("clustMgrAgent::OnIndexCreate Notification "+
"Received for Create Index %v", indexDefn)
pc := meta.makeDefaultPartitionContainer()
idxInst := common.IndexInst{InstId: common.IndexInstId(indexDefn.DefnId),
Defn: *indexDefn,
State: common.INDEX_STATE_CREATED,
Pc: pc,
}
respCh := make(MsgChannel)
meta.adminCh <- &MsgCreateIndex{mType: CLUST_MGR_CREATE_INDEX_DDL,
indexInst: idxInst,
respCh: respCh}
//wait for response
if res, ok := <-respCh; ok {
switch res.GetMsgType() {
case MSG_SUCCESS:
logging.Infof("clustMgrAgent::OnIndexCreate Success "+
"for Create Index %v", indexDefn)
return nil
case MSG_ERROR:
logging.Errorf("clustMgrAgent::OnIndexCreate Error "+
"for Create Index %v. Error %v.", indexDefn, res)
err := res.(*MsgError).GetError()
return err.cause
default:
logging.Fatalf("clustMgrAgent::OnIndexCreate Unknown Response "+
"Received for Create Index %v. Response %v", indexDefn, res)
common.CrashOnError(errors.New("Unknown Response"))
}
} else {
logging.Fatalf("clustMgrAgent::OnIndexCreate Unexpected Channel Close "+
"for Create Index %v", indexDefn)
common.CrashOnError(errors.New("Unknown Response"))
}
return nil
}
示例6: siSplitEntry
func siSplitEntry(entry []byte, tmp []byte) ([]byte, []byte) {
e := secondaryIndexEntry(entry)
sk, err := e.ReadSecKey(tmp)
c.CrashOnError(err)
docid, err := e.ReadDocId(sk)
return sk, docid[len(sk):]
}
示例7: handleUpdateTopologyForIndex
func (c *clustMgrAgent) handleUpdateTopologyForIndex(cmd Message) {
logging.Debugf("ClustMgr:handleUpdateTopologyForIndex %v", cmd)
indexList := cmd.(*MsgClustMgrUpdate).GetIndexList()
updatedFields := cmd.(*MsgClustMgrUpdate).GetUpdatedFields()
updatedState := common.INDEX_STATE_NIL
updatedStream := common.NIL_STREAM
updatedError := ""
for _, index := range indexList {
if updatedFields.state {
updatedState = index.State
}
if updatedFields.stream {
updatedStream = index.Stream
}
if updatedFields.err {
updatedError = index.Error
}
updatedBuildTs := index.BuildTs
err := c.mgr.UpdateIndexInstance(index.Defn.Bucket, index.Defn.DefnId,
updatedState, updatedStream, updatedError, updatedBuildTs)
common.CrashOnError(err)
}
c.supvCmdch <- &MsgSuccess{}
}
示例8: addPartnInfoToProtoInst
func addPartnInfoToProtoInst(cfg c.Config, cinfo *c.ClusterInfoCache,
indexInst c.IndexInst, streamId c.StreamId, protoInst *protobuf.IndexInst) {
switch partn := indexInst.Pc.(type) {
case *c.KeyPartitionContainer:
//Right now the fill the SinglePartition as that is the only
//partition structure supported
partnDefn := partn.GetAllPartitions()
//TODO move this to indexer init. These addresses cannot change.
//Better to get these once and store.
cinfo.Lock()
defer cinfo.Unlock()
err := cinfo.Fetch()
c.CrashOnError(err)
nid := cinfo.GetCurrentNode()
streamMaintAddr, err := cinfo.GetServiceAddress(nid, "indexStreamMaint")
c.CrashOnError(err)
streamInitAddr, err := cinfo.GetServiceAddress(nid, "indexStreamInit")
c.CrashOnError(err)
streamCatchupAddr, err := cinfo.GetServiceAddress(nid, "indexStreamCatchup")
c.CrashOnError(err)
var endpoints []string
for _, p := range partnDefn {
for _, e := range p.Endpoints() {
//Set the right endpoint based on streamId
switch streamId {
case c.MAINT_STREAM:
e = c.Endpoint(streamMaintAddr)
case c.CATCHUP_STREAM:
e = c.Endpoint(streamCatchupAddr)
case c.INIT_STREAM:
e = c.Endpoint(streamInitAddr)
}
endpoints = append(endpoints, string(e))
}
}
protoInst.SinglePartn = &protobuf.SinglePartition{
Endpoints: endpoints,
}
}
}
示例9: OnIndexDelete
func (meta *metaNotifier) OnIndexDelete(defnId common.IndexDefnId, bucket string) error {
logging.Infof("clustMgrAgent::OnIndexDelete Notification "+
"Received for Drop IndexId %v", defnId)
respCh := make(MsgChannel)
//Treat DefnId as InstId for now
meta.adminCh <- &MsgDropIndex{mType: CLUST_MGR_DROP_INDEX_DDL,
indexInstId: common.IndexInstId(defnId),
respCh: respCh,
bucket: bucket}
//wait for response
if res, ok := <-respCh; ok {
switch res.GetMsgType() {
case MSG_SUCCESS:
logging.Infof("clustMgrAgent::OnIndexDelete Success "+
"for Drop IndexId %v", defnId)
return nil
case MSG_ERROR:
logging.Errorf("clustMgrAgent::OnIndexDelete Error "+
"for Drop IndexId %v. Error %v", defnId, res)
err := res.(*MsgError).GetError()
return err.cause
default:
logging.Fatalf("clustMgrAgent::OnIndexDelete Unknown Response "+
"Received for Drop IndexId %v. Response %v", defnId, res)
common.CrashOnError(errors.New("Unknown Response"))
}
} else {
logging.Fatalf("clustMgrAgent::OnIndexDelete Unexpected Channel Close "+
"for Drop IndexId %v", defnId)
common.CrashOnError(errors.New("Unknown Response"))
}
return nil
}
示例10: handleIndexerReady
func (c *clustMgrAgent) handleIndexerReady(cmd Message) {
logging.Debugf("ClustMgr:handleIndexerReady %v", cmd)
err := c.mgr.NotifyIndexerReady()
common.CrashOnError(err)
c.supvCmdch <- &MsgSuccess{}
}
示例11: newIndexEntry
func (s *fdbSnapshot) newIndexEntry(b []byte) IndexEntry {
var entry IndexEntry
var err error
if s.slice.isPrimary {
entry, err = BytesToPrimaryIndexEntry(b)
} else {
entry, err = BytesToSecondaryIndexEntry(b)
}
common.CrashOnError(err)
return entry
}
示例12: handleDeleteBucket
func (c *clustMgrAgent) handleDeleteBucket(cmd Message) {
logging.Debugf("ClustMgr:handleDeleteBucket %v", cmd)
bucket := cmd.(*MsgClustMgrUpdate).GetBucket()
streamId := cmd.(*MsgClustMgrUpdate).GetStreamId()
err := c.mgr.DeleteIndexForBucket(bucket, streamId)
common.CrashOnError(err)
c.supvCmdch <- &MsgSuccess{}
}
示例13: handleSupervisorCommands
//handleSupervisorCommands handles the messages from Supervisor
//Each operation acquires the mutex to make the itself atomic.
func (m *mutationMgr) handleSupervisorCommands(cmd Message) {
switch cmd.GetMsgType() {
case OPEN_STREAM:
m.handleOpenStream(cmd)
case ADD_INDEX_LIST_TO_STREAM:
m.handleAddIndexListToStream(cmd)
case REMOVE_INDEX_LIST_FROM_STREAM:
m.handleRemoveIndexListFromStream(cmd)
case REMOVE_BUCKET_FROM_STREAM:
m.handleRemoveBucketFromStream(cmd)
case CLOSE_STREAM:
m.handleCloseStream(cmd)
case CLEANUP_STREAM:
m.handleCleanupStream(cmd)
case MUT_MGR_PERSIST_MUTATION_QUEUE:
m.handlePersistMutationQueue(cmd)
case MUT_MGR_DRAIN_MUTATION_QUEUE:
m.handleDrainMutationQueue(cmd)
case MUT_MGR_GET_MUTATION_QUEUE_HWT:
m.handleGetMutationQueueHWT(cmd)
case MUT_MGR_GET_MUTATION_QUEUE_LWT:
m.handleGetMutationQueueLWT(cmd)
case UPDATE_INDEX_INSTANCE_MAP:
m.handleUpdateIndexInstMap(cmd)
case UPDATE_INDEX_PARTITION_MAP:
m.handleUpdateIndexPartnMap(cmd)
case MUT_MGR_ABORT_PERSIST:
m.handleAbortPersist(cmd)
case CONFIG_SETTINGS_UPDATE:
m.handleConfigUpdate(cmd)
default:
logging.Fatalf("MutationMgr::handleSupervisorCommands Received Unknown Command %v", cmd)
common.CrashOnError(errors.New("Unknown Command On Supervisor Channel"))
}
}
示例14: main
func main() {
platform.HideConsole(true)
defer platform.HideConsole(false)
common.SeedProcess()
logging.Infof("Indexer started with command line: %v\n", os.Args)
flag.Parse()
logging.SetLogLevel(logging.Level(*logLevel))
forestdb.Log = &logging.SystemLogger
// setup cbauth
if *auth != "" {
up := strings.Split(*auth, ":")
logging.Tracef("Initializing cbauth with user %v for cluster %v\n", up[0], *cluster)
if _, err := cbauth.InternalRetryDefaultInit(*cluster, up[0], up[1]); err != nil {
logging.Fatalf("Failed to initialize cbauth: %s", err)
}
}
go platform.DumpOnSignal()
go common.ExitOnStdinClose()
config := common.SystemConfig
config.SetValue("indexer.clusterAddr", *cluster)
config.SetValue("indexer.numVbuckets", *numVbuckets)
config.SetValue("indexer.enableManager", *enableManager)
config.SetValue("indexer.adminPort", *adminPort)
config.SetValue("indexer.scanPort", *scanPort)
config.SetValue("indexer.httpPort", *httpPort)
config.SetValue("indexer.streamInitPort", *streamInitPort)
config.SetValue("indexer.streamCatchupPort", *streamCatchupPort)
config.SetValue("indexer.streamMaintPort", *streamMaintPort)
config.SetValue("indexer.storage_dir", *storageDir)
storage_dir := config["indexer.storage_dir"].String()
if err := os.MkdirAll(storage_dir, 0755); err != nil {
common.CrashOnError(err)
}
_, msg := indexer.NewIndexer(config)
if msg.GetMsgType() != indexer.MSG_SUCCESS {
logging.Warnf("Indexer Failure to Init %v", msg)
}
logging.Infof("Indexer exiting normally\n")
}
示例15: adjustNonSnapAlignedVbs
//If a snapshot marker has been received but no mutation for that snapshot,
//the repairTs seqno will be outside the snapshot marker range and
//DCP will refuse to accept such seqno for restart. Such VBs need to
//use lastFlushTs or restartTs.
func (ss *StreamState) adjustNonSnapAlignedVbs(repairTs *common.TsVbuuid,
streamId common.StreamId, bucket string) {
for _, vbno := range repairTs.GetVbnos() {
seqno := repairTs.Seqnos[vbno]
snapBegin := repairTs.Snapshots[vbno][0]
snapEnd := repairTs.Snapshots[vbno][1]
if !(seqno >= snapBegin && seqno <= snapEnd) {
// First, use the last flush TS seqno if avaliable
if fts, ok := ss.streamBucketLastFlushedTsMap[streamId][bucket]; ok && fts != nil {
repairTs.Snapshots[vbno][0] = fts.Snapshots[vbno][0]
repairTs.Snapshots[vbno][1] = fts.Snapshots[vbno][1]
repairTs.Seqnos[vbno] = fts.Seqnos[vbno]
snapBegin = repairTs.Snapshots[vbno][0]
snapEnd = repairTs.Snapshots[vbno][1]
}
// If last flush TS is still out-of-bound, use last Snap-aligned flushed TS if available
if !(repairTs.Seqnos[vbno] >= snapBegin && repairTs.Seqnos[vbno] <= snapEnd) {
if fts, ok := ss.streamBucketLastSnapAlignFlushedTsMap[streamId][bucket]; ok && fts != nil {
repairTs.Snapshots[vbno][0] = fts.Snapshots[vbno][0]
repairTs.Snapshots[vbno][1] = fts.Snapshots[vbno][1]
repairTs.Seqnos[vbno] = fts.Seqnos[vbno]
snapBegin = repairTs.Snapshots[vbno][0]
snapEnd = repairTs.Snapshots[vbno][1]
}
}
// If last snap-aligned flushed TS is not avail, then use restartTS
if !(repairTs.Seqnos[vbno] >= snapBegin && repairTs.Seqnos[vbno] <= snapEnd) {
if rts, ok := ss.streamBucketRestartTsMap[streamId][bucket]; ok && rts != nil {
//if no flush has been done yet, use restart TS
repairTs.Snapshots[vbno][0] = rts.Snapshots[vbno][0]
repairTs.Snapshots[vbno][1] = rts.Snapshots[vbno][1]
repairTs.Seqnos[vbno] = rts.Seqnos[vbno]
snapBegin = repairTs.Snapshots[vbno][0]
snapEnd = repairTs.Snapshots[vbno][1]
}
}
//if seqno is still not with snapshot range, then it is likely a bug in state management
if !(repairTs.Seqnos[vbno] >= snapBegin && repairTs.Seqnos[vbno] <= snapEnd) {
common.CrashOnError(errors.New("No Valid Restart Seqno Found"))
}
}
}
}