本文整理汇总了Golang中github.com/dedis/cothority/log.Lvl4函数的典型用法代码示例。如果您正苦于以下问题:Golang Lvl4函数的具体用法?Golang Lvl4怎么用?Golang Lvl4使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Lvl4函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: dispatchMsgReader
func (n *TreeNodeInstance) dispatchMsgReader() {
for {
n.msgDispatchQueueMutex.Lock()
if n.closing == true {
log.Lvl3("Closing reader")
n.msgDispatchQueueMutex.Unlock()
return
}
if len(n.msgDispatchQueue) > 0 {
log.Lvl4(n.Info(), "Read message and dispatching it",
len(n.msgDispatchQueue))
msg := n.msgDispatchQueue[0]
n.msgDispatchQueue = n.msgDispatchQueue[1:]
n.msgDispatchQueueMutex.Unlock()
err := n.dispatchMsgToProtocol(msg)
if err != nil {
log.Error("Error while dispatching message:", err)
}
} else {
n.msgDispatchQueueMutex.Unlock()
log.Lvl4(n.Info(), "Waiting for message")
<-n.msgDispatchQueueWait
}
}
}
示例2: listen
// listen starts listening for messages coming from any host that tries to
// contact this host. If 'wait' is true, it will try to connect to itself before
// returning.
func (h *Host) listen(wait bool) {
log.Lvl3(h.ServerIdentity.First(), "starts to listen")
fn := func(c network.SecureConn) {
log.Lvl3(h.workingAddress, "Accepted Connection from", c.Remote())
// register the connection once we know it's ok
h.registerConnection(c)
h.handleConn(c)
}
go func() {
log.Lvl4("Host listens on:", h.workingAddress)
err := h.host.Listen(fn)
if err != nil {
log.Fatal("Couldn't listen on", h.workingAddress, ":", err)
}
}()
if wait {
for {
log.Lvl4(h.ServerIdentity.First(), "checking if listener is up")
_, err := h.Connect(h.ServerIdentity)
if err == nil {
log.Lvl4(h.ServerIdentity.First(), "managed to connect to itself")
break
}
time.Sleep(network.WaitRetry)
}
}
}
示例3: handleCommit
// handleCommit receives commit messages and signal the end if it received
// enough of it.
func (p *Protocol) handleCommit(com *Commit) {
if p.state != stateCommit {
// log.Lvl3(p.Name(), "STORE handle commit packet")
p.tempCommitMsg = append(p.tempCommitMsg, com)
return
}
// finish after threshold of Commit msgs
p.commitMsgCount++
log.Lvl4(p.Name(), "----------------\nWe got", p.commitMsgCount,
"COMMIT msgs and threshold is", p.threshold)
if p.IsRoot() {
log.Lvl4("Leader got ", p.commitMsgCount)
}
if p.commitMsgCount >= p.threshold {
p.state = stateFinished
// reset counter
p.commitMsgCount = 0
log.Lvl3(p.Name(), "Threshold reached: We are done... CONSENSUS")
if p.IsRoot() && p.onDoneCB != nil {
log.Lvl3(p.Name(), "We are root and threshold reached: return to the simulation.")
p.onDoneCB()
p.finish()
}
return
}
}
示例4: ProcessProtocolMsg
// ProcessProtocolMsg takes a message and puts it into a queue for later processing.
// This allows a protocol to have a backlog of messages.
func (n *TreeNodeInstance) ProcessProtocolMsg(msg *ProtocolMsg) {
log.Lvl4(n.Info(), "Received message")
n.msgDispatchQueueMutex.Lock()
n.msgDispatchQueue = append(n.msgDispatchQueue, msg)
log.Lvl4(n.Info(), "DispatchQueue-length is", len(n.msgDispatchQueue))
if len(n.msgDispatchQueue) == 1 && len(n.msgDispatchQueueWait) == 0 {
n.msgDispatchQueueWait <- true
}
n.msgDispatchQueueMutex.Unlock()
}
示例5: closeConnections
// CloseConnections only shuts down the network connections - used mainly
// for testing.
func (h *Host) closeConnections() error {
h.networkLock.Lock()
defer h.networkLock.Unlock()
for _, c := range h.connections {
log.Lvl4(h.ServerIdentity.First(), "Closing connection", c, c.Remote(), c.Local())
err := c.Close()
if err != nil {
log.Error(h.ServerIdentity.First(), "Couldn't close connection", c)
return err
}
}
log.Lvl4(h.ServerIdentity.First(), "Closing tcpHost")
h.connections = make(map[network.ServerIdentityID]network.SecureConn)
return h.host.Close()
}
示例6: Dispatch
// Dispatch can handle timeouts
func (p *Propagate) Dispatch() error {
process := true
log.Lvl4(p.ServerIdentity())
for process {
p.Lock()
timeout := time.Millisecond * time.Duration(p.sd.Msec)
p.Unlock()
select {
case msg := <-p.ChannelSD:
log.Lvl3(p.ServerIdentity(), "Got data from", msg.ServerIdentity)
if p.onData != nil {
_, netMsg, err := network.UnmarshalRegistered(msg.Data)
if err == nil {
p.onData(netMsg)
}
}
if !p.IsRoot() {
log.Lvl3(p.ServerIdentity(), "Sending to parent")
p.SendToParent(&PropagateReply{})
}
if p.IsLeaf() {
process = false
} else {
log.Lvl3(p.ServerIdentity(), "Sending to children")
p.SendToChildren(&msg.PropagateSendData)
}
case <-p.ChannelReply:
p.received++
log.Lvl4(p.ServerIdentity(), "received:", p.received, p.subtree)
if !p.IsRoot() {
p.SendToParent(&PropagateReply{})
}
if p.received == p.subtree {
process = false
}
case <-time.After(timeout):
log.Fatal("Timeout")
process = false
}
}
if p.IsRoot() {
if p.onDoneCb != nil {
p.onDoneCb(p.received + 1)
}
}
p.Done()
return nil
}
示例7: aggregate
// aggregate store the message for a protocol instance such that a protocol
// instances will get all its children messages at once.
// node is the node the host is representing in this Tree, and sda is the
// message being analyzed.
func (n *TreeNodeInstance) aggregate(sdaMsg *ProtocolMsg) (network.MessageTypeID, []*ProtocolMsg, bool) {
mt := sdaMsg.MsgType
fromParent := !n.IsRoot() && sdaMsg.From.TreeNodeID.Equal(n.Parent().ID)
if fromParent || !n.HasFlag(mt, AggregateMessages) {
return mt, []*ProtocolMsg{sdaMsg}, true
}
// store the msg according to its type
if _, ok := n.msgQueue[mt]; !ok {
n.msgQueue[mt] = make([]*ProtocolMsg, 0)
}
msgs := append(n.msgQueue[mt], sdaMsg)
n.msgQueue[mt] = msgs
log.Lvl4(n.ServerIdentity().Addresses, "received", len(msgs), "of", len(n.Children()), "messages")
// do we have everything yet or no
// get the node this host is in this tree
// OK we have all the children messages
if len(msgs) == len(n.Children()) {
// erase
delete(n.msgQueue, mt)
return mt, msgs, true
}
// no we still have to wait!
return mt, nil, false
}
示例8: handleResponsePrepare
func (bft *ProtocolBFTCoSi) handleResponsePrepare(r *Response) error {
// check if we have enough
bft.tprMut.Lock()
defer bft.tprMut.Unlock()
bft.tempPrepareResponse = append(bft.tempPrepareResponse, r.Response)
if len(bft.tempPrepareResponse) < len(bft.Children()) {
return nil
}
// wait for verification
bzrReturn, ok := bft.waitResponseVerification()
if ok {
// append response
resp, err := bft.prepare.Response(bft.tempPrepareResponse)
if err != nil {
return err
}
bzrReturn.Response = resp
}
log.Lvl4("BFTCoSi Handle Response PREPARE")
if bft.IsRoot() {
// Notify 'commit'-round as we're root
if err := bft.startChallengeCommit(); err != nil {
log.Error(err)
}
return nil
}
return bft.SendTo(bft.Parent(), bzrReturn)
}
示例9: Start
// Start will execute one cothority-binary for each server
// configured
func (d *Localhost) Start(args ...string) error {
if err := os.Chdir(d.runDir); err != nil {
return err
}
log.Lvl4("Localhost: chdir into", d.runDir)
ex := d.runDir + "/" + d.Simulation
d.running = true
log.Lvl1("Starting", d.servers, "applications of", ex)
for index := 0; index < d.servers; index++ {
d.wgRun.Add(1)
log.Lvl3("Starting", index)
host := "localhost" + strconv.Itoa(index)
cmdArgs := []string{"-address", host, "-monitor",
"localhost:" + strconv.Itoa(d.monitorPort),
"-simul", d.Simulation,
"-debug", strconv.Itoa(log.DebugVisible()),
}
cmdArgs = append(args, cmdArgs...)
log.Lvl3("CmdArgs are", cmdArgs)
cmd := exec.Command(ex, cmdArgs...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
go func(i int, h string) {
log.Lvl3("Localhost: will start host", h)
err := cmd.Run()
if err != nil {
log.Error("Error running localhost", h, ":", err)
d.errChan <- err
}
d.wgRun.Done()
log.Lvl3("host (index", i, ")", h, "done")
}(index, host)
}
return nil
}
示例10: Equal
// Equal verifies if the given tree is equal
func (t *Tree) Equal(t2 *Tree) bool {
if t.ID != t2.ID || t.Roster.ID != t2.Roster.ID {
log.Lvl4("Ids of trees don't match")
return false
}
return t.Root.Equal(t2.Root)
}
示例11: handleChallengeCommit
// handleCommitChallenge will verify the signature + check if no more than 1/3
// of participants refused to sign.
func (bft *ProtocolBFTCoSi) handleChallengeCommit(ch *ChallengeCommit) error {
ch.Challenge = bft.commit.Challenge(ch.Challenge)
hash := bft.Suite().Hash()
hash.Write(bft.Msg)
h := hash.Sum(nil)
// verify if the signature is correct
if err := cosi.VerifyCosiSignatureWithException(bft.suite,
bft.AggregatedPublic, h, ch.Signature,
ch.Exceptions); err != nil {
log.Error(bft.Name(), "Verification of the signature failed:", err)
bft.signRefusal = true
}
// Check if we have no more than 1/3 failed nodes
if len(ch.Exceptions) > int(bft.threshold) {
log.Errorf("More than 1/3 (%d/%d) refused to sign ! ABORT",
len(ch.Exceptions), len(bft.Roster().List))
bft.signRefusal = true
}
// store the exceptions for later usage
bft.tempExceptions = ch.Exceptions
log.Lvl4("BFTCoSi handle Challenge COMMIT")
if bft.IsLeaf() {
return bft.startResponseCommit()
}
if err := bft.SendToChildrenInParallel(ch); err != nil {
log.Error(err)
}
return nil
}
示例12: CreateRoster
// CreateRoster creates an Roster with the host-names in 'addresses'.
// It creates 's.Hosts' entries, starting from 'port' for each round through
// 'addresses'
func (s *SimulationBFTree) CreateRoster(sc *SimulationConfig, addresses []string, port int) {
start := time.Now()
nbrAddr := len(addresses)
if sc.PrivateKeys == nil {
sc.PrivateKeys = make(map[string]abstract.Scalar)
}
hosts := s.Hosts
if s.SingleHost {
// If we want to work with a single host, we only make one
// host per server
log.Fatal("Not supported yet")
hosts = nbrAddr
if hosts > s.Hosts {
hosts = s.Hosts
}
}
localhosts := false
listeners := make([]net.Listener, hosts)
if strings.Contains(addresses[0], "localhost") {
localhosts = true
}
entities := make([]*network.ServerIdentity, hosts)
log.Lvl3("Doing", hosts, "hosts")
key := config.NewKeyPair(network.Suite)
for c := 0; c < hosts; c++ {
key.Secret.Add(key.Secret,
key.Suite.Scalar().One())
key.Public.Add(key.Public,
key.Suite.Point().Base())
address := addresses[c%nbrAddr] + ":"
if localhosts {
// If we have localhosts, we have to search for an empty port
var err error
listeners[c], err = net.Listen("tcp", ":0")
if err != nil {
log.Fatal("Couldn't search for empty port:", err)
}
_, p, _ := net.SplitHostPort(listeners[c].Addr().String())
address += p
log.Lvl4("Found free port", address)
} else {
address += strconv.Itoa(port + c/nbrAddr)
}
entities[c] = network.NewServerIdentity(key.Public, address)
sc.PrivateKeys[entities[c].Addresses[0]] = key.Secret
}
// And close all our listeners
if localhosts {
for _, l := range listeners {
err := l.Close()
if err != nil {
log.Fatal("Couldn't close port:", l, err)
}
}
}
sc.Roster = NewRoster(entities)
log.Lvl3("Creating entity List took: " + time.Now().Sub(start).String())
}
示例13: Close
// Close calls all nodes, deletes them from the list and closes them
func (o *Overlay) Close() {
o.instancesLock.Lock()
defer o.instancesLock.Unlock()
for _, tni := range o.instances {
log.Lvl4(o.host.workingAddress, "Closing TNI", tni.TokenID())
o.nodeDelete(tni.Token())
}
}
示例14: nodeDone
// nodeDone is either called by the end of EndProtocol or by the end of the
// response phase of the commit round.
func (bft *ProtocolBFTCoSi) nodeDone() bool {
log.Lvl4(bft.Name(), "nodeDone()")
bft.doneProcessing <- true
if bft.onDoneCallback != nil {
// only true for the root
bft.onDoneCallback()
}
return true
}
示例15: startAnnouncementCommit
// startAnnouncementCommit create the announcement for the commit phase and
// sends it down the tree.
func (bft *ProtocolBFTCoSi) startAnnouncementCommit() error {
ann := bft.commit.CreateAnnouncement()
a := &Announce{
TYPE: RoundCommit,
Announcement: ann,
}
log.Lvl4(bft.Name(), "BFTCoSi Start Announcement (COMMIT)")
return bft.sendAnnouncement(a)
}