本文整理汇总了Golang中github.com/libopenstorage/gossip/types.NodeId函数的典型用法代码示例。如果您正苦于以下问题:Golang NodeId函数的具体用法?Golang NodeId怎么用?Golang NodeId使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NodeId函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestGossiperAddRemoveGetNode
func TestGossiperAddRemoveGetNode(t *testing.T) {
printTestInfo()
g := NewGossiperImpl("0.0.0.0:9010", "0")
nodes := []string{"0.0.0.0:90011",
"0.0.0.0:90012", "0.0.0.0:90013",
"0.0.0.0:90014"}
// test add nodes
i := 1
for _, node := range nodes {
err := g.AddNode(node, types.NodeId(strconv.Itoa(i)))
if err != nil {
t.Error("Error adding new node")
}
i++
}
// try adding existing node
err := g.AddNode(nodes[0], types.NodeId(strconv.Itoa(1)))
if err == nil {
t.Error("Duplicate node addition did not fail")
}
// check the nodelist is same
peerNodes := g.GetNodes()
if len(peerNodes) != len(nodes) {
t.Error("Peer nodes len does not match added nodes, got: ",
peerNodes, " expected: ", nodes)
}
outer:
for _, origNode := range nodes {
for _, peerNode := range peerNodes {
if origNode == peerNode {
continue outer
}
}
t.Error("Peer nodes does not have added node: ", origNode)
}
// test remove nodes
for _, node := range nodes {
err := g.RemoveNode(node)
if err != nil {
t.Error("Error removing new node")
}
}
// try removing non-existing node
err = g.RemoveNode("0.0.0.0:9020")
if err == nil {
t.Error("Non-existing node removal did not fail")
}
g.Stop()
}
示例2: TestGossipStoreSubset
func TestGossipStoreSubset(t *testing.T) {
printTestInfo()
g := NewGossipStore(ID)
log.Info("Testing: empty store and empty nodelist")
// empty store and empty nodelist and non-empty nodelist
diff := make(types.StoreNodes, 0)
sv := g.Subset(diff)
if len(sv) != 0 {
t.Error("Emtpy subset expected, got: ", sv)
}
nodeLen := 10
for i := 0; i < nodeLen*2; i++ {
diff = append(diff, types.NodeId(strconv.Itoa(i)))
}
log.Info("Testing: empty store and non-empty nodelist")
sv = g.Subset(diff)
if len(sv) != 0 {
t.Error("Emtpy subset expected, got: ", sv)
}
// store and diff asks for 20 nodes but store
// has only a subset of it, as well as some keys
// it does not know about
keyList := []types.StoreKey{"key1", "key2", "key3"}
g.nodeMap = make(types.NodeInfoMap)
for _, key := range keyList {
fillUpNodeInfoMap(g.nodeMap, key, nodeLen)
}
diff = make(types.StoreNodes, 0)
for i := 0; i < nodeLen; i++ {
diff = append(diff, types.NodeId(strconv.Itoa(2*i)))
}
log.Info("Testing: empty store and non-empty nodelist")
sv = g.Subset(diff)
if len(sv) != nodeLen/2 {
t.Error("Subset has more keys then requested: ", sv)
}
for id, info := range sv {
gInfo, ok := g.nodeMap[id]
if !ok {
t.Error("Subset returned id which was not originally present ", id)
}
if !compareNodeInfo(info, gInfo) {
t.Error("Node info does not match, d:", info, " o:", gInfo)
}
}
}
示例3: verifyGossiperEquality
func verifyGossiperEquality(g1 *GossiperImpl, g2 *GossiperImpl, t *testing.T) {
// check for the equality
g1Keys := g1.GetStoreKeys()
g2Keys := g2.GetStoreKeys()
if len(g1Keys) != len(g2Keys) {
t.Error("Keys mismatch, g1: ", g1Keys, " g2:", g2Keys)
}
for _, key := range g1Keys {
g1Values := g1.GetStoreKeyValue(key)
g2Values := g1.GetStoreKeyValue(key)
t.Log("g1Values: ", g1Values)
t.Log("g2Values: ", g2Values)
if len(g1Values) != len(g2Values) {
t.Fatal("Lens mismatch between g1 and g2 values")
}
for i := 0; i < len(g1Values); i++ {
id := types.NodeId(strconv.Itoa(i))
if g1Values[id].Id != g2Values[id].Id {
t.Error("Values mismtach between g1 and g2, g1:\n",
g1Values[id].Id, "\ng2:", g2Values[id].Id)
}
}
}
}
示例4: fillUpNodeInfo
func fillUpNodeInfo(node *types.NodeInfo, key types.StoreKey, i int) {
node.Id = types.NodeId(strconv.Itoa(i))
node.LastUpdateTs = time.Now()
node.Status = types.NODE_STATUS_UP
node.Value = make(types.StoreMap)
node.Value[types.StoreKey(CPU+key)] = node.Id
node.Value[types.StoreKey(MEMORY+key)] = node.Id
}
示例5: joinCluster
// Initialize node and alert listeners that we are joining the cluster.
func (c *ClusterManager) joinCluster(db *Database, self *api.Node, exist bool) error {
var err error
// If I am already in the cluster map, don't add me again.
if exist {
goto found
}
// Alert all listeners that we are a new node joining an existing cluster.
for e := c.listeners.Front(); e != nil; e = e.Next() {
err = e.Value.(ClusterListener).Init(self, db)
if err != nil {
self.Status = api.Status_STATUS_ERROR
dlog.Warnf("Failed to initialize Init %s: %v",
e.Value.(ClusterListener).String(), err)
c.cleanupInit(db, self)
goto done
}
}
found:
// Alert all listeners that we are joining the cluster.
for e := c.listeners.Front(); e != nil; e = e.Next() {
err = e.Value.(ClusterListener).Join(self, db)
if err != nil {
self.Status = api.Status_STATUS_ERROR
dlog.Warnf("Failed to initialize Join %s: %v",
e.Value.(ClusterListener).String(), err)
if exist == false {
c.cleanupInit(db, self)
}
goto done
}
}
for id, n := range db.NodeEntries {
if id != c.config.NodeId {
// Check to see if the IP is the same. If it is, then we have a stale entry.
if n.MgmtIp == self.MgmtIp {
dlog.Warnf("Warning, Detected node %s with the same IP %s in the database. Will not connect to this node.",
id, n.MgmtIp)
} else {
// Gossip with this node.
dlog.Infof("Connecting to node %s with IP %s.", id, n.MgmtIp)
c.gossip.AddNode(n.MgmtIp+":9002", types.NodeId(id))
}
}
}
done:
return err
}
示例6: TestTransportSendAndRcvData
func TestTransportSendAndRcvData(t *testing.T) {
printTestInfo()
time.Sleep(10 * time.Second)
data1 := &TestData{}
data2 := &TestData{}
data1.Data = make(map[types.StoreKey]types.NodeInfo)
data2.Data = make(map[types.StoreKey]types.NodeInfo)
var handler types.OnMessageRcv = func(c types.MessageChannel) {
err := c.RcvData(&data2)
if err != nil {
t.Error("Error receiving data: ", err)
} else {
t.Log("Done receiving")
}
}
ipString := "0.0.0.0:19002"
r := NewRunnableMessageChannel(ipString, handler)
go r.RunOnRcvData()
time.Sleep(1 * time.Second)
keyList := []types.StoreKey{"key1", "key2"}
for i, key := range keyList {
var node types.NodeInfo
node.Id = types.NodeId(i)
node.Value = make(types.StoreMap)
node.Value[key] = "some data"
data1.Data[key] = node
}
s := NewMessageChannel(ipString)
if s == nil {
t.Fatal("Error creating send channel, failing test")
}
go s.SendData(&data1)
time.Sleep(1 * time.Second)
s.Close()
r.Close()
if len(data1.Data) != len(data2.Data) {
t.Error("Sent and rcvd messages mismatch, sent: ", data1,
" got: ", data2)
}
}
示例7: TestGossipStoreGetStoreKeyValue
func TestGossipStoreGetStoreKeyValue(t *testing.T) {
printTestInfo()
// Case: emtpy store
// Case: key absent
g := NewGossipStore(ID)
keyList := []types.StoreKey{"key1", "key2"}
nodeInfoMap := g.GetStoreKeyValue(keyList[0])
if len(nodeInfoMap) != 0 {
t.Error("Expected empty node info list, got: ", nodeInfoMap)
}
// Case: key present with nodes with holes in node ids
fillUpNodeInfoMap(g.nodeMap, keyList[0], 6)
if len(g.nodeMap) != 6 {
t.Error("Failed to fillup node info map properly, got: ",
g.nodeMap)
}
keyCheck := types.StoreKey(CPU + keyList[0])
delete(g.nodeMap["0"].Value, keyCheck)
delete(g.nodeMap["2"].Value, keyCheck)
delete(g.nodeMap["4"].Value, keyCheck)
nodeInfoMap = g.GetStoreKeyValue(keyCheck)
if len(nodeInfoMap) != 3 {
t.Error("Expected list with atleast 6 elements, got: ", nodeInfoMap)
}
for i := 0; i < len(nodeInfoMap); i++ {
id := types.NodeId(strconv.Itoa(i))
if i%2 == 0 {
if _, ok := nodeInfoMap[id]; ok {
t.Error("No node expected, got: ", nodeInfoMap[id])
}
continue
}
infoMap := nodeInfoMap[id].Value.(types.NodeId)
if nodeInfoMap[id].Id != id ||
nodeInfoMap[id].Status != types.NODE_STATUS_UP ||
infoMap != id {
t.Error("Invalid node content received, got: ", nodeInfoMap[id])
}
}
}
示例8: watchDB
// Get the latest config.
func (c *ClusterManager) watchDB(key string, opaque interface{},
kvp *kvdb.KVPair, err error) error {
db, err := readDatabase()
if err != nil {
dlog.Warnln("Failed to read database after update ", err)
return nil
}
// The only value we rely on during an update is the cluster size.
c.size = db.Size
for id, n := range db.NodeEntries {
if id != c.config.NodeId {
// Check to see if the IP is the same. If it is, then we have a stale entry.
c.gossip.UpdateNode(n.MgmtIp+":9002", types.NodeId(id))
}
}
return nil
}
示例9: Start
func (c *ClusterManager) Start() error {
log.Info("Cluster manager starting...")
kvdb := kv.Instance()
// Start the gossip protocol.
// XXX Make the port configurable.
gob.Register(api.Node{})
c.g = gossip.New("0.0.0.0:9002", gossiptypes.NodeId(c.config.NodeId))
c.g.SetGossipInterval(2 * time.Second)
kvlock, err := kvdb.Lock("cluster/lock", 60)
if err != nil {
log.Panic("Fatal, Unable to obtain cluster lock.", err)
}
db, err := readDatabase()
if err != nil {
log.Panic(err)
}
if db.Status == api.StatusInit {
log.Info("Will initialize a new cluster.")
c.status = api.StatusOk
db.Status = api.StatusOk
self, _ := c.initNode(&db)
err = c.initCluster(&db, self, false)
if err != nil {
kvdb.Unlock(kvlock)
log.Error("Failed to initialize the cluster.", err)
log.Panic(err)
}
// Update the new state of the cluster in the KV Database
err = writeDatabase(&db)
if err != nil {
log.Errorf("Failed to save the database.", err)
log.Panic(err)
}
err = kvdb.Unlock(kvlock)
if err != nil {
log.Panic("Fatal, unable to unlock cluster... Did something take too long to initialize?", err)
}
} else if db.Status&api.StatusOk > 0 {
log.Info("Cluster state is OK... Joining the cluster.")
c.status = api.StatusOk
self, exist := c.initNode(&db)
err = c.joinCluster(&db, self, exist)
if err != nil {
kvdb.Unlock(kvlock)
log.Panic(err)
}
err = writeDatabase(&db)
if err != nil {
log.Panic(err)
}
err = kvdb.Unlock(kvlock)
if err != nil {
log.Panic("Fatal, unable to unlock cluster... Did something take too long to initialize?", err)
}
} else {
kvdb.Unlock(kvlock)
err = errors.New("Fatal, Cluster is in an unexpected state.")
log.Panic(err)
}
// Start heartbeating to other nodes.
go c.heartBeat()
return nil
}
示例10: clearKey
func clearKey(nodes types.NodeInfoMap, key types.StoreKey, id int) {
nodeId := types.NodeId(strconv.Itoa(id))
nodeInfo := nodes[nodeId]
delete(nodeInfo.Value, types.StoreKey(CPU+key))
delete(nodeInfo.Value, types.StoreKey(MEMORY+key))
}
示例11: TestTransportFailures
func TestTransportFailures(t *testing.T) {
printTestInfo()
time.Sleep(10 * time.Second)
data1 := &TestData{}
data2 := &TestData{}
data1.Data = make(map[types.StoreKey]types.NodeInfo)
data2.Data = make(map[types.StoreKey]types.NodeInfo)
var handler types.OnMessageRcv = func(c types.MessageChannel) {
err := c.RcvData(&data2)
if err == nil {
t.Error("Did not receive expected error")
}
return
}
fmt.Println("Close without sending data")
ipString := "0.0.0.0:17016"
r := NewRunnableMessageChannel(ipString, handler)
go r.RunOnRcvData()
time.Sleep(5 * time.Second)
keyList := []types.StoreKey{"key1", "key2"}
for i, key := range keyList {
var node types.NodeInfo
node.Id = types.NodeId(i)
node.Value = make(types.StoreMap)
node.Value[key] = "some data"
data1.Data[key] = node
}
// close the channel without sending any message
s := NewMessageChannel(ipString)
if s == nil {
t.Fatal("Error creating send channel, failing test")
}
time.Sleep(10 * time.Millisecond)
s.Close()
time.Sleep(10 * time.Millisecond)
r.Close()
time.Sleep(10 * time.Millisecond)
fmt.Println("Close the channel and then send")
// open and then close the channel
ipString = "0.0.0.0:17617"
r = NewRunnableMessageChannel(ipString, handler)
go r.RunOnRcvData()
time.Sleep(1 * time.Second)
r.Close()
time.Sleep(2 * time.Second)
// try sending data to closed end
s = NewMessageChannel(ipString)
if s != nil {
t.Error("Error, expected nil sender")
}
fmt.Println("Send non-marshalable data")
// try sending non-marshabable data
ipString = "0.0.0.0:17418"
r = NewRunnableMessageChannel(ipString, handler)
go r.RunOnRcvData()
time.Sleep(1 * time.Second)
s = NewMessageChannel(ipString)
if s == nil {
t.Fatal("Error creating send channel, failing test")
}
nonMarshalData := make(map[types.StoreKey]map[types.StoreKey]types.NodeInfoMap)
err := s.SendData(nonMarshalData)
if err != nil {
t.Error("Expected error sending non-marshalable data")
}
s.Close()
r.Close()
}
示例12: TestGossiperOneNodeNeverGossips
func TestGossiperOneNodeNeverGossips(t *testing.T) {
printTestInfo()
nodes := []string{"0.0.0.0:9222", "0.0.0.0:9223",
"0.0.0.0:9224"}
rand.Seed(time.Now().UnixNano())
gossipers := make(map[int]*GossiperImpl)
for i, nodeId := range nodes {
if i == 0 {
// node 0 never comes up
continue
}
id := types.NodeId(strconv.Itoa(i))
g := NewGossiperImpl(nodeId, id)
g.SetGossipInterval(time.Duration(200+rand.Intn(200)) * time.Millisecond)
for j, peer := range nodes {
if i == j {
continue
}
g.AddNode(peer, types.NodeId(j))
}
gossipers[i] = g
}
// each node must mark node 0 as down
key := types.StoreKey("somekey")
value := "someValue"
for i, g := range gossipers {
g.UpdateSelf(key, value+strconv.Itoa(i))
}
for i, g := range gossipers {
res := g.GetStoreKeyValue(key)
for nodeId, n := range res {
if nodeId != n.Id {
t.Error("Gossiper ", i, "Id does not match ",
nodeId, " n:", n.Id)
}
nid, ok := strconv.Atoi(string(nodeId))
if ok != nil {
t.Error("Failed to convert node to id ", nodeId, " n.Id", n.Id)
}
if nid == 0 {
if n.Status == types.NODE_STATUS_DOWN {
t.Error("Gossiper ", i,
"Expected node status not to be down: ", nodeId, " n:", n)
}
}
}
}
time.Sleep(2 * time.Second)
for i, g := range gossipers {
res := g.GetStoreKeyValue(key)
for nodeId, n := range res {
if nodeId != n.Id {
t.Error("Gossiper ", i, "Id does not match ",
nodeId, " n:", n.Id)
}
nid, ok := strconv.Atoi(string(nodeId))
if ok != nil {
t.Error("Failed to convert node to id ", nodeId, " n.Id", n.Id)
}
if nid == 0 {
if n.Status != types.NODE_STATUS_DOWN {
t.Error("Gossiper ", i,
"Expected node status to be down: ", nodeId, " n:", n)
}
} else {
if n.Status != types.NODE_STATUS_UP {
t.Error("Gossiper ", i, "Expected node to be up: ", nodeId,
" n:", n)
}
}
}
}
for _, g := range gossipers {
g.Stop()
}
}
示例13: Start
func (c *ClusterManager) Start() error {
logrus.Info("Cluster manager starting...")
c.gEnabled = true
c.selfNode = api.Node{}
c.selfNode.GenNumber = uint64(time.Now().UnixNano())
c.selfNode.Id = c.config.NodeId
c.selfNode.Status = api.StatusOk
c.selfNode.Ip, _ = externalIp(&c.config)
c.selfNode.NodeData = make(map[string]interface{})
c.system = systemutils.New()
// Start the gossip protocol.
// XXX Make the port configurable.
gob.Register(api.Node{})
c.g = gossip.New("0.0.0.0:9002", types.NodeId(c.config.NodeId),
c.selfNode.GenNumber)
c.g.SetGossipInterval(2 * time.Second)
kvdb := kvdb.Instance()
kvlock, err := kvdb.Lock("cluster/lock", 60)
if err != nil {
logrus.Panic("Fatal, Unable to obtain cluster lock.", err)
}
defer kvdb.Unlock(kvlock)
db, err := readDatabase()
if err != nil {
logrus.Panic(err)
}
if db.Status == api.StatusInit {
logrus.Info("Will initialize a new cluster.")
c.status = api.StatusOk
db.Status = api.StatusOk
self, _ := c.initNode(&db)
err = c.initCluster(&db, self, false)
if err != nil {
logrus.Error("Failed to initialize the cluster.", err)
return err
}
// Update the new state of the cluster in the KV Database
err := writeDatabase(&db)
if err != nil {
logrus.Error("Failed to save the database.", err)
return err
}
} else if db.Status&api.StatusOk > 0 {
logrus.Info("Cluster state is OK... Joining the cluster.")
c.status = api.StatusOk
self, exist := c.initNode(&db)
err = c.joinCluster(&db, self, exist)
if err != nil {
logrus.Error("Failed to join cluster.", err)
return err
}
err := writeDatabase(&db)
if err != nil {
return err
}
} else {
return errors.New("Fatal, Cluster is in an unexpected state.")
}
// Start heartbeating to other nodes.
c.g.Start()
go c.heartBeat()
return nil
}
示例14: heartBeat
func (c *ClusterManager) heartBeat() {
gossipStoreKey := types.StoreKey(heartbeatKey + c.config.ClusterId)
lastUpdateTs := time.Now()
for {
node := c.getCurrentState()
c.nodeCache[node.Id] = *node
currTime := time.Now()
if currTime.Sub(lastUpdateTs) > 10*time.Second {
logrus.Warn("No gossip update for 10 seconds")
}
c.g.UpdateSelf(gossipStoreKey, *node)
lastUpdateTs = currTime
// Process heartbeats from other nodes...
gossipValues := c.g.GetStoreKeyValue(gossipStoreKey)
for id, nodeInfo := range gossipValues {
if id == types.NodeId(node.Id) {
continue
}
cachedNodeInfo, nodeFoundInCache := c.nodeCache[string(id)]
n := cachedNodeInfo
ok := false
if nodeInfo.Value != nil {
n, ok = nodeInfo.Value.(api.Node)
if !ok {
logrus.Error("Received a bad broadcast packet: %v", nodeInfo.Value)
continue
}
}
if nodeFoundInCache {
if n.Status != api.StatusOk {
logrus.Warn("Detected node ", n.Id, " to be unhealthy.")
for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
err := e.Value.(ClusterListener).Update(&n)
if err != nil {
logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String())
}
}
delete(c.nodeCache, n.Id)
continue
} else if nodeInfo.Status == types.NODE_STATUS_DOWN {
ne := c.getLatestNodeConfig(string(id))
if ne != nil && nodeInfo.GenNumber < ne.GenNumber {
logrus.Warn("Detected stale update for node ", id,
" going down, ignoring it")
c.g.MarkNodeHasOldGen(id)
delete(c.nodeCache, cachedNodeInfo.Id)
continue
}
logrus.Warn("Detected node ", id, " to be offline due to inactivity.")
n.Status = api.StatusOffline
for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
err := e.Value.(ClusterListener).Update(&n)
if err != nil {
logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String())
}
}
delete(c.nodeCache, cachedNodeInfo.Id)
} else if nodeInfo.Status == types.NODE_STATUS_DOWN_WAITING_FOR_NEW_UPDATE {
logrus.Warn("Detected node ", n.Id, " to be offline due to inactivity.")
n.Status = api.StatusOffline
for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
err := e.Value.(ClusterListener).Update(&n)
if err != nil {
logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String())
}
}
delete(c.nodeCache, cachedNodeInfo.Id)
} else {
// node may be up or waiting for new update,
// no need to tell listeners as yet.
c.nodeCache[cachedNodeInfo.Id] = n
}
} else if nodeInfo.Status == types.NODE_STATUS_UP {
// A node discovered in the cluster.
logrus.Warn("Detected node ", n.Id, " to be in the cluster.")
c.nodeCache[n.Id] = n
for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
err := e.Value.(ClusterListener).Add(&n)
if err != nil {
logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String())
}
}
}
}
time.Sleep(2 * time.Second)
//.........这里部分代码省略.........
示例15: Start
func (c *ClusterManager) Start() error {
dlog.Infoln("Cluster manager starting...")
c.gEnabled = true
c.selfNode = api.Node{}
c.selfNode.GenNumber = uint64(time.Now().UnixNano())
c.selfNode.Id = c.config.NodeId
c.selfNode.Status = api.Status_STATUS_OK
c.selfNode.MgmtIp, c.selfNode.DataIp, _ = ExternalIp(&c.config)
c.selfNode.NodeData = make(map[string]interface{})
c.system = systemutils.New()
// Start the gossip protocol.
// XXX Make the port configurable.
gob.Register(api.Node{})
c.gossip = gossip.New("0.0.0.0:9002", types.NodeId(c.config.NodeId),
c.selfNode.GenNumber)
c.gossip.SetGossipInterval(2 * time.Second)
kvdb := kvdb.Instance()
kvlock, err := kvdb.Lock(clusterLockKey, 60)
if err != nil {
dlog.Panicln("Fatal, Unable to obtain cluster lock.", err)
}
defer kvdb.Unlock(kvlock)
db, err := readDatabase()
if err != nil {
dlog.Panicln(err)
}
// Cluster database max size... 0 if unlimited.
c.size = db.Size
if db.Status == api.Status_STATUS_INIT {
dlog.Infoln("Will initialize a new cluster.")
c.status = api.Status_STATUS_OK
db.Status = api.Status_STATUS_OK
self, _ := c.initNode(&db)
err = c.initCluster(&db, self, false)
if err != nil {
dlog.Errorln("Failed to initialize the cluster.", err)
return err
}
// Update the new state of the cluster in the KV Database
err := writeDatabase(&db)
if err != nil {
dlog.Errorln("Failed to save the database.", err)
return err
}
} else if db.Status&api.Status_STATUS_OK > 0 {
dlog.Infoln("Cluster state is OK... Joining the cluster.")
c.status = api.Status_STATUS_OK
self, exist := c.initNode(&db)
err = c.joinCluster(&db, self, exist)
if err != nil {
dlog.Errorln("Failed to join cluster.", err)
return err
}
err := writeDatabase(&db)
if err != nil {
return err
}
} else {
return errors.New("Fatal, Cluster is in an unexpected state.")
}
// Start heartbeating to other nodes.
go c.startHeartBeat()
go c.updateClusterStatus()
kvdb.WatchKey(ClusterDBKey, 0, nil, c.watchDB)
return nil
}