本文整理汇总了Golang中github.com/couchbase/indexing/secondary/common.ClusterInfoCache.Unlock方法的典型用法代码示例。如果您正苦于以下问题:Golang ClusterInfoCache.Unlock方法的具体用法?Golang ClusterInfoCache.Unlock怎么用?Golang ClusterInfoCache.Unlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/couchbase/indexing/secondary/common.ClusterInfoCache
的用法示例。
在下文中一共展示了ClusterInfoCache.Unlock方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: ValidateBucket
func ValidateBucket(cluster, bucket string, uuids []string) bool {
var cinfo *common.ClusterInfoCache
url, err := common.ClusterAuthUrl(cluster)
if err == nil {
cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
}
if err != nil {
logging.Fatalf("Indexer::Fail to init ClusterInfoCache : %v", err)
common.CrashOnError(err)
}
cinfo.Lock()
defer cinfo.Unlock()
if err := cinfo.Fetch(); err != nil {
logging.Errorf("Indexer::Fail to init ClusterInfoCache : %v", err)
common.CrashOnError(err)
}
if nids, err := cinfo.GetNodesByBucket(bucket); err == nil && len(nids) != 0 {
// verify UUID
currentUUID := cinfo.GetBucketUUID(bucket)
for _, uuid := range uuids {
if uuid != currentUUID {
return false
}
}
return true
} else {
logging.Fatalf("Indexer::Error Fetching Bucket Info: %v Nids: %v", err, nids)
return false
}
}
示例2: addPartnInfoToProtoInst
func addPartnInfoToProtoInst(cfg c.Config, cinfo *c.ClusterInfoCache,
indexInst c.IndexInst, streamId c.StreamId, protoInst *protobuf.IndexInst) {
switch partn := indexInst.Pc.(type) {
case *c.KeyPartitionContainer:
//Right now the fill the SinglePartition as that is the only
//partition structure supported
partnDefn := partn.GetAllPartitions()
//TODO move this to indexer init. These addresses cannot change.
//Better to get these once and store.
cinfo.Lock()
defer cinfo.Unlock()
err := cinfo.Fetch()
c.CrashOnError(err)
nid := cinfo.GetCurrentNode()
streamMaintAddr, err := cinfo.GetServiceAddress(nid, "indexStreamMaint")
c.CrashOnError(err)
streamInitAddr, err := cinfo.GetServiceAddress(nid, "indexStreamInit")
c.CrashOnError(err)
streamCatchupAddr, err := cinfo.GetServiceAddress(nid, "indexStreamCatchup")
c.CrashOnError(err)
var endpoints []string
for _, p := range partnDefn {
for _, e := range p.Endpoints() {
//Set the right endpoint based on streamId
switch streamId {
case c.MAINT_STREAM:
e = c.Endpoint(streamMaintAddr)
case c.CATCHUP_STREAM:
e = c.Endpoint(streamCatchupAddr)
case c.INIT_STREAM:
e = c.Endpoint(streamInitAddr)
}
endpoints = append(endpoints, string(e))
}
}
protoInst.SinglePartn = &protobuf.SinglePartition{
Endpoints: endpoints,
}
}
}
示例3: NewClustMgrAgent
func NewClustMgrAgent(supvCmdch MsgChannel, supvRespch MsgChannel, cfg common.Config) (
ClustMgrAgent, Message) {
//Init the clustMgrAgent struct
c := &clustMgrAgent{
supvCmdch: supvCmdch,
supvRespch: supvRespch,
config: cfg,
}
var cinfo *common.ClusterInfoCache
url, err := common.ClusterAuthUrl(cfg["clusterAddr"].String())
if err == nil {
cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
}
if err != nil {
logging.Errorf("ClustMgrAgent::Fail to init ClusterInfoCache : %v", err)
return nil, &MsgError{
err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
severity: FATAL,
category: CLUSTER_MGR,
cause: err}}
}
cinfo.Lock()
defer cinfo.Unlock()
if err := cinfo.Fetch(); err != nil {
logging.Errorf("ClustMgrAgent::Fail to init ClusterInfoCache : %v", err)
return nil, &MsgError{
err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
severity: FATAL,
category: CLUSTER_MGR,
cause: err}}
}
mgr, err := manager.NewIndexManager(cinfo, cfg)
if err != nil {
logging.Errorf("ClustMgrAgent::NewClustMgrAgent Error In Init %v", err)
return nil, &MsgError{
err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
severity: FATAL,
category: CLUSTER_MGR,
cause: err}}
}
c.mgr = mgr
metaNotifier := NewMetaNotifier(supvRespch, cfg)
if metaNotifier == nil {
logging.Errorf("ClustMgrAgent::NewClustMgrAgent Error In Init %v", err)
return nil, &MsgError{
err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
severity: FATAL,
category: CLUSTER_MGR}}
}
mgr.RegisterNotifier(metaNotifier)
c.metaNotifier = metaNotifier
//start clustMgrAgent loop which listens to commands from its supervisor
go c.run()
//register with Index Manager for notification of metadata updates
return c, &MsgSuccess{}
}