本文整理匯總了Golang中github.com/ethereum/go-ethereum/logger/glog.V函數的典型用法代碼示例。如果您正苦於以下問題:Golang V函數的具體用法?Golang V怎麽用?Golang V使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了V函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: getFromPeer
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) {
d.activePeer = p.id
defer func() {
// reset on error
if err != nil {
d.queue.reset()
}
}()
glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
// Start the fetcher. This will block the update entirely
// interupts need to be send to the appropriate channels
// respectively.
if err = d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
return err
}
// Start fetching blocks in paralel. The strategy is simple
// take any available peers, seserve a chunk for each peer available,
// let the peer deliver the chunkn and periodically check if a peer
// has timedout.
if err = d.startFetchingBlocks(p); err != nil {
return err
}
glog.V(logger.Detail).Infoln("Sync completed")
return nil
}
示例2: SetUrlHint
func (self *Registrar) SetUrlHint(urlhint string, addr common.Address) (txhash string, err error) {
if urlhint != "" {
UrlHintAddr = urlhint
} else {
if !zero.MatchString(UrlHintAddr) {
return
}
nameHex, extra := encodeName(UrlHintName, 2)
urlHintAbi := resolveAbi + nameHex + extra
glog.V(logger.Detail).Infof("UrlHint address query data: %s to %s", urlHintAbi, GlobalRegistrarAddr)
var res string
res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", urlHintAbi)
if len(res) >= 40 {
UrlHintAddr = "0x" + res[len(res)-40:len(res)]
}
if err != nil || zero.MatchString(UrlHintAddr) {
if (addr == common.Address{}) {
err = fmt.Errorf("UrlHint address not found and sender for creation not given")
return
}
txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "210000", "", UrlHintCode)
if err != nil {
err = fmt.Errorf("UrlHint address not found and sender for creation failed: %v", err)
}
glog.V(logger.Detail).Infof("created UrlHint @ txhash %v\n", txhash)
} else {
glog.V(logger.Detail).Infof("UrlHint found @ %v\n", HashRegAddr)
return
}
}
return
}
示例3: resetState
func (pool *TxPool) resetState() {
currentState, err := pool.currentState()
if err != nil {
glog.V(logger.Info).Infoln("failed to get current state: %v", err)
return
}
managedState := state.ManageState(currentState)
if err != nil {
glog.V(logger.Info).Infoln("failed to get managed state: %v", err)
return
}
pool.pendingState = managedState
// validate the pool of pending transactions, this will remove
// any transactions that have been included in the block or
// have been invalidated because of another transaction (e.g.
// higher gas price)
pool.validatePool()
// Loop over the pending transactions and base the nonce of the new
// pending transaction set.
for _, tx := range pool.pending {
if addr, err := tx.From(); err == nil {
// Set the nonce. Transaction nonce can never be lower
// than the state nonce; validatePool took care of that.
if pool.pendingState.GetNonce(addr) <= tx.Nonce() {
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
}
}
}
// Check the queue and move transactions over to the pending if possible
// or remove those that have become invalid
pool.checkQueue()
}
示例4: parseNodes
// parseNodes parses a list of discovery node URLs loaded from a .json file.
func (cfg *Config) parseNodes(file string) []*discover.Node {
// Short circuit if no node config is present
path := filepath.Join(cfg.DataDir, file)
if _, err := os.Stat(path); err != nil {
return nil
}
// Load the nodes from the config file
blob, err := ioutil.ReadFile(path)
if err != nil {
glog.V(logger.Error).Infof("Failed to access nodes: %v", err)
return nil
}
nodelist := []string{}
if err := json.Unmarshal(blob, &nodelist); err != nil {
glog.V(logger.Error).Infof("Failed to load nodes: %v", err)
return nil
}
// Interpret the list as a discovery node array
var nodes []*discover.Node
for _, url := range nodelist {
if url == "" {
continue
}
node, err := discover.ParseNode(url)
if err != nil {
glog.V(logger.Error).Infof("Node URL %s: %v\n", url, err)
continue
}
nodes = append(nodes, node)
}
return nodes
}
示例5: add
// add inserts a new envelope into the message pool to be distributed within the
// whisper network. It also inserts the envelope into the expiration pool at the
// appropriate time-stamp.
func (self *Whisper) add(envelope *Envelope) error {
self.poolMu.Lock()
defer self.poolMu.Unlock()
// short circuit when a received envelope has already expired
if envelope.Expiry < uint32(time.Now().Unix()) {
return nil
}
// Insert the message into the tracked pool
hash := envelope.Hash()
if _, ok := self.messages[hash]; ok {
glog.V(logger.Detail).Infof("whisper envelope already cached: %x\n", envelope)
return nil
}
self.messages[hash] = envelope
// Insert the message into the expiration pool for later removal
if self.expirations[envelope.Expiry] == nil {
self.expirations[envelope.Expiry] = set.NewNonTS()
}
if !self.expirations[envelope.Expiry].Has(hash) {
self.expirations[envelope.Expiry].Add(hash)
// Notify the local node of a message arrival
go self.postEvent(envelope)
}
glog.V(logger.Detail).Infof("cached whisper envelope %x\n", envelope)
return nil
}
示例6: enqueue
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
hash := block.Hash()
// Ensure the peer isn't DOSing us
count := f.queues[peer] + 1
if count > blockLimit {
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
return
}
// Discard any past or too distant blocks
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
return
}
// Schedule the block for future importing
if _, ok := f.queued[hash]; !ok {
op := &inject{
origin: peer,
block: block,
}
f.queues[peer] = count
f.queued[hash] = op
f.queue.Push(op, -float32(block.NumberU64()))
if glog.V(logger.Debug) {
glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
}
}
}
示例7: listenLoop
// listenLoop runs in its own goroutine and accepts
// inbound connections.
func (srv *Server) listenLoop() {
defer srv.loopWG.Done()
glog.V(logger.Info).Infoln("Listening on", srv.listener.Addr())
// This channel acts as a semaphore limiting
// active inbound connections that are lingering pre-handshake.
// If all slots are taken, no further connections are accepted.
tokens := maxAcceptConns
if srv.MaxPendingPeers > 0 {
tokens = srv.MaxPendingPeers
}
slots := make(chan struct{}, tokens)
for i := 0; i < tokens; i++ {
slots <- struct{}{}
}
for {
<-slots
fd, err := srv.listener.Accept()
if err != nil {
return
}
glog.V(logger.Debug).Infof("Accepted conn %v\n", fd.RemoteAddr())
go func() {
srv.setupConn(fd, inboundConn, nil)
slots <- struct{}{}
}()
}
}
示例8: unlockAccount
// tries unlocking the specified account a few times.
func unlockAccount(ctx *cli.Context, accman *accounts.Manager, address string, i int, passwords []string) (accounts.Account, string) {
account, err := utils.MakeAddress(accman, address)
if err != nil {
utils.Fatalf("Could not list accounts: %v", err)
}
for trials := 0; trials < 3; trials++ {
prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3)
password := getPassPhrase(prompt, false, i, passwords)
err = accman.Unlock(account, password)
if err == nil {
glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
return account, password
}
if err, ok := err.(*accounts.AmbiguousAddrError); ok {
glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
return ambiguousAddrRecovery(accman, err, password), password
}
if err != accounts.ErrDecrypt {
// No need to prompt again if the error is not decryption-related.
break
}
}
// All trials expended to unlock account, bail out
utils.Fatalf("Failed to unlock account %s (%v)", address, err)
return accounts.Account{}, ""
}
示例9: listenLoop
// main loop for adding connections via listening
func (srv *Server) listenLoop() {
defer srv.loopWG.Done()
// This channel acts as a semaphore limiting
// active inbound connections that are lingering pre-handshake.
// If all slots are taken, no further connections are accepted.
slots := make(chan struct{}, maxAcceptConns)
for i := 0; i < maxAcceptConns; i++ {
slots <- struct{}{}
}
glog.V(logger.Info).Infoln("Listening on", srv.listener.Addr())
for {
<-slots
conn, err := srv.listener.Accept()
if err != nil {
return
}
glog.V(logger.Debug).Infof("Accepted conn %v\n", conn.RemoteAddr())
srv.peerWG.Add(1)
go func() {
srv.startPeer(conn, nil)
slots <- struct{}{}
}()
}
}
示例10: run
func (p *Peer) run() DiscReason {
readErr := make(chan error, 1)
p.wg.Add(2)
go p.readLoop(readErr)
go p.pingLoop()
p.startProtocols()
// Wait for an error or disconnect.
var reason DiscReason
select {
case err := <-readErr:
if r, ok := err.(DiscReason); ok {
reason = r
} else {
// Note: We rely on protocols to abort if there is a write
// error. It might be more robust to handle them here as well.
glog.V(logger.Detail).Infof("%v: Read error: %v\n", p, err)
reason = DiscNetworkError
}
case err := <-p.protoErr:
reason = discReasonForError(err)
case reason = <-p.disc:
}
close(p.closed)
p.politeDisconnect(reason)
p.wg.Wait()
glog.V(logger.Debug).Infof("%v: Disconnected: %v\n", p, reason)
return reason
}
示例11: Map
// Map adds a port mapping on m and keeps it alive until c is closed.
// This function is typically invoked in its own goroutine.
func Map(m Interface, c chan struct{}, protocol string, extport, intport int, name string) {
refresh := time.NewTimer(mapUpdateInterval)
defer func() {
refresh.Stop()
glog.V(logger.Debug).Infof("deleting port mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
m.DeleteMapping(protocol, extport, intport)
}()
if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
} else {
glog.V(logger.Info).Infof("mapped network port %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
}
for {
select {
case _, ok := <-c:
if !ok {
return
}
case <-refresh.C:
glog.V(logger.Detail).Infof("refresh port mapping %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
}
refresh.Reset(mapUpdateInterval)
}
}
}
示例12: startIpc
func startIpc(cfg IpcConfig, codec codec.Codec, api shared.EthereumApi) error {
os.Remove(cfg.Endpoint) // in case it still exists from a previous run
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: cfg.Endpoint, Net: "unix"})
if err != nil {
return err
}
os.Chmod(cfg.Endpoint, 0600)
go func() {
for {
conn, err := l.AcceptUnix()
if err != nil {
glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err)
continue
}
go handle(conn, api, codec)
}
os.Remove(cfg.Endpoint)
}()
glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint)
return nil
}
示例13: synchronise
func (pm *ProtocolManager) synchronise(peer *peer) {
// Make sure the peer's TD is higher than our own. If not drop.
if peer.td.Cmp(pm.chainman.Td()) <= 0 {
return
}
// Check downloader if it's busy so it doesn't show the sync message
// for every attempty
if pm.downloader.IsBusy() {
return
}
// FIXME if we have the hash in our chain and the TD of the peer is
// much higher than ours, something is wrong with us or the peer.
// Check if the hash is on our own chain
if pm.chainman.HasBlock(peer.recentHash) {
return
}
// Get the hashes from the peer (synchronously)
err := pm.downloader.Synchronise(peer.id, peer.recentHash)
if err != nil && err == downloader.ErrBadPeer {
glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action")
pm.removePeer(peer)
} else if err != nil {
// handle error
glog.V(logger.Detail).Infoln("error downloading:", err)
}
}
示例14: handle
// handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
// Execute the Ethereum handshake
td, head, genesis := pm.chainman.Status()
if err := p.Handshake(td, head, genesis); err != nil {
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
return err
}
// Register the peer locally
glog.V(logger.Detail).Infof("%v: adding peer", p)
if err := pm.peers.Register(p); err != nil {
glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
return err
}
defer pm.removePeer(p.id)
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks); err != nil {
return err
}
// Propagate existing transactions. new transactions appearing
// after this will be sent via broadcasts.
pm.syncTransactions(p)
// main loop. handle incoming messages.
for {
if err := pm.handleMsg(p); err != nil {
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
return err
}
}
return nil
}
示例15: startPeer
func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) {
// TODO: handle/store session token
// Run setupFunc, which should create an authenticated connection
// and run the capability exchange. Note that any early error
// returns during that exchange need to call peerWG.Done because
// the callers of startPeer added the peer to the wait group already.
fd.SetDeadline(time.Now().Add(handshakeTimeout))
conn, err := srv.setupFunc(fd, srv.PrivateKey, srv.ourHandshake, dest, srv.keepconn)
if err != nil {
fd.Close()
glog.V(logger.Debug).Infof("Handshake with %v failed: %v", fd.RemoteAddr(), err)
srv.peerWG.Done()
return
}
conn.MsgReadWriter = &netWrapper{
wrapped: conn.MsgReadWriter,
conn: fd, rtimeout: frameReadTimeout, wtimeout: frameWriteTimeout,
}
p := newPeer(fd, conn, srv.Protocols)
if ok, reason := srv.addPeer(conn, p); !ok {
glog.V(logger.Detail).Infof("Not adding %v (%v)\n", p, reason)
p.politeDisconnect(reason)
srv.peerWG.Done()
return
}
// The handshakes are done and it passed all checks.
// Spawn the Peer loops.
go srv.runPeer(p)
}