本文整理汇总了Golang中github.com/coreos/etcd/log.Infof函数的典型用法代码示例。如果您正苦于以下问题:Golang Infof函数的具体用法?Golang Infof怎么用?Golang Infof使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Infof函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: monitorPeerActivity
// monitorPeerActivity has the leader periodically for dead nodes and demotes them.
func (s *PeerServer) monitorPeerActivity(closeChan chan bool) {
for {
select {
case <-time.After(PeerActivityMonitorTimeout):
case <-closeChan:
return
}
// Ignore while this peer is not a leader.
if s.raftServer.State() != raft.Leader {
continue
}
// Check last activity for all peers.
now := time.Now()
promoteDelay := time.Duration(s.ClusterConfig().PromoteDelay) * time.Second
peers := s.raftServer.Peers()
for _, peer := range peers {
// If the last response from the peer is longer than the promote delay
// then automatically demote the peer.
if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > promoteDelay {
log.Infof("%s: demoting node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
if _, err := s.raftServer.Do(&DemoteCommand{Name: peer.Name}); err != nil {
log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
}
continue
}
}
}
}
示例2: runServer
func (e *Etcd) runServer() {
var removeNotify <-chan bool
for {
if e.mode == PeerMode {
log.Infof("%v starting in peer mode", e.Config.Name)
// Starting peer server should be followed close by listening on its port
// If not, it may leave many requests unaccepted, or cannot receive heartbeat from the cluster.
// One severe problem caused if failing receiving heartbeats is when the second node joins one-node cluster,
// the cluster could be out of work as long as the two nodes cannot transfer messages.
e.PeerServer.Start(e.Config.Snapshot, e.Config.ClusterConfig())
removeNotify = e.PeerServer.RemoveNotify()
} else {
log.Infof("%v starting in standby mode", e.Config.Name)
e.StandbyServer.Start()
removeNotify = e.StandbyServer.RemoveNotify()
}
// etcd server is ready to accept connections, notify waiters.
e.onceReady.Do(func() { close(e.readyNotify) })
select {
case <-e.closeChan:
e.PeerServer.Stop()
e.StandbyServer.Stop()
return
case <-removeNotify:
}
if e.mode == PeerMode {
peerURLs := e.Registry.PeerURLs(e.PeerServer.RaftServer().Leader(), e.Config.Name)
e.StandbyServer.SyncCluster(peerURLs)
e.setMode(StandbyMode)
} else {
// Create etcd key-value store and registry.
e.Store = store.New()
e.Registry = server.NewRegistry(e.Store)
e.PeerServer.SetStore(e.Store)
e.PeerServer.SetRegistry(e.Registry)
e.Server.SetStore(e.Store)
e.Server.SetRegistry(e.Registry)
// Generate new peer server here.
// TODO(yichengq): raft server cannot be started after stopped.
// It should be removed when raft restart is implemented.
heartbeatInterval := time.Duration(e.Config.Peer.HeartbeatInterval) * time.Millisecond
electionTimeout := time.Duration(e.Config.Peer.ElectionTimeout) * time.Millisecond
raftServer, err := raft.NewServer(e.Config.Name, e.Config.DataDir, e.PeerServer.RaftServer().Transporter(), e.Store, e.PeerServer, "")
if err != nil {
log.Fatal(err)
}
raftServer.SetElectionTimeout(electionTimeout)
raftServer.SetHeartbeatInterval(heartbeatInterval)
e.PeerServer.SetRaftServer(raftServer, e.Config.Snapshot)
e.StandbyServer.SetRaftServer(raftServer)
e.PeerServer.SetJoinIndex(e.StandbyServer.JoinIndex())
e.setMode(PeerMode)
}
}
}
示例3: monitorActiveSize
// monitorActiveSize has the leader periodically check the status of cluster
// nodes and swaps them out for proxies as needed.
func (s *PeerServer) monitorActiveSize(closeChan chan bool) {
for {
select {
case <-time.After(ActiveMonitorTimeout):
case <-closeChan:
return
}
// Ignore while this peer is not a leader.
if s.raftServer.State() != raft.Leader {
continue
}
// Retrieve target active size and actual active size.
activeSize := s.ClusterConfig().ActiveSize
peerCount := s.registry.PeerCount()
proxies := s.registry.Proxies()
peers := s.registry.Peers()
if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
peers = append(peers[:index], peers[index+1:]...)
}
// If we have more active nodes than we should then demote.
if peerCount > activeSize {
peer := peers[rand.Intn(len(peers))]
log.Infof("%s: demoting: %v", s.Config.Name, peer)
if _, err := s.raftServer.Do(&DemoteCommand{Name: peer}); err != nil {
log.Infof("%s: warning: demotion error: %v", s.Config.Name, err)
}
continue
}
// If we don't have enough active nodes then try to promote a proxy.
if peerCount < activeSize && len(proxies) > 0 {
loop:
for _, i := range rand.Perm(len(proxies)) {
proxy := proxies[i]
proxyPeerURL, _ := s.registry.ProxyPeerURL(proxy)
log.Infof("%s: attempting to promote: %v (%s)", s.Config.Name, proxy, proxyPeerURL)
// Notify proxy to promote itself.
client := &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false,
ResponseHeaderTimeout: ActiveMonitorTimeout,
},
}
resp, err := client.Post(fmt.Sprintf("%s/promote", proxyPeerURL), "application/json", nil)
if err != nil {
log.Infof("%s: warning: promotion error: %v", s.Config.Name, err)
continue
} else if resp.StatusCode != http.StatusOK {
log.Infof("%s: warning: promotion failure: %v", s.Config.Name, resp.StatusCode)
continue
}
break loop
}
}
}
}
示例4: monitorPeerActivity
// monitorPeerActivity has the leader periodically for dead nodes and demotes them.
func (s *PeerServer) monitorPeerActivity() {
for {
timer := time.NewTimer(PeerActivityMonitorTimeout)
defer timer.Stop()
select {
case <-s.closeChan:
return
case <-timer.C:
}
// Ignore while this peer is not a leader.
if s.raftServer.State() != raft.Leader {
continue
}
// Check last activity for all peers.
now := time.Now()
removeDelay := time.Duration(int64(s.ClusterConfig().RemoveDelay * float64(time.Second)))
peers := s.raftServer.Peers()
for _, peer := range peers {
// If the last response from the peer is longer than the remove delay
// then automatically demote the peer.
if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > removeDelay {
log.Infof("%s: removing node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
if _, err := s.raftServer.Do(&RemoveCommand{Name: peer.Name}); err != nil {
log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
}
continue
}
}
}
}
示例5: monitorActiveSize
// monitorActiveSize has the leader periodically check the status of cluster
// nodes and swaps them out for standbys as needed.
func (s *PeerServer) monitorActiveSize() {
for {
timer := time.NewTimer(ActiveMonitorTimeout)
defer timer.Stop()
select {
case <-s.closeChan:
return
case <-timer.C:
}
// Ignore while this peer is not a leader.
if s.raftServer.State() != raft.Leader {
continue
}
// Retrieve target active size and actual active size.
activeSize := s.ClusterConfig().ActiveSize
peers := s.registry.Names()
peerCount := len(peers)
if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
peers = append(peers[:index], peers[index+1:]...)
}
// If we have more active nodes than we should then remove.
if peerCount > activeSize {
peer := peers[rand.Intn(len(peers))]
log.Infof("%s: removing: %v", s.Config.Name, peer)
if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
}
continue
}
}
}
示例6: logSnapshot
// logSnapshot logs about the snapshot that was taken.
func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
if err != nil {
log.Infof("%s attempted and failed: %v", info, err)
} else {
log.Infof("%s completed", info)
}
}
示例7: Do
func (d *Discoverer) Do(discoveryURL string, name string, peer string) (peers []string, err error) {
d.name = name
d.peer = peer
d.discoveryURL = discoveryURL
u, err := url.Parse(discoveryURL)
if err != nil {
return
}
// prefix is prepended to all keys for this discovery
d.prefix = strings.TrimPrefix(u.Path, "/v2/keys/")
// keep the old path in case we need to set the KeyPrefix below
oldPath := u.Path
u.Path = ""
// Connect to a scheme://host not a full URL with path
log.Infof("Discovery via %s using prefix %s.", u.String(), d.prefix)
d.client = etcd.NewClient([]string{u.String()})
if !strings.HasPrefix(oldPath, "/v2/keys") {
d.client.SetKeyPrefix("")
}
// Register this machine first and announce that we are a member of
// this cluster
err = d.heartbeat()
if err != nil {
return
}
// Start the very slow heartbeat to the cluster now in anticipation
// that everything is going to go alright now
go d.startHeartbeat()
// Attempt to take the leadership role, if there is no error we are it!
resp, err := d.client.Create(path.Join(d.prefix, stateKey), startedState, 0)
// Bail out on unexpected errors
if err != nil {
if clientErr, ok := err.(*etcd.EtcdError); !ok || clientErr.ErrorCode != etcdErr.EcodeNodeExist {
return nil, err
}
}
// If we got a response then the CAS was successful, we are leader
if resp != nil && resp.Node.Value == startedState {
// We are the leader, we have no peers
log.Infof("Discovery _state was empty, so this machine is the initial leader.")
return nil, nil
}
// Fall through to finding the other discovery peers
return d.findPeers()
}
示例8: SyncCluster
func (s *StandbyServer) SyncCluster(peers []string) error {
for i, url := range peers {
peers[i] = s.fullPeerURL(url)
}
if err := s.syncCluster(peers); err != nil {
log.Infof("fail syncing cluster(%v): %v", s.ClusterURLs(), err)
return err
}
log.Infof("set cluster(%v) for standby server", s.ClusterURLs())
return nil
}
示例9: logHeartbeatTimeout
// logHeartbeatTimeout logs about the edge triggered heartbeat timeout event
// only if we haven't warned within a reasonable interval.
func (s *PeerServer) logHeartbeatTimeout(peer *raft.Peer) {
b, ok := s.logBackoffs[peer.Name]
if !ok {
b = &logBackoff{time.Time{}, time.Second, 1}
s.logBackoffs[peer.Name] = b
}
if peer.LastActivity().After(b.next) {
b.next = time.Time{}
b.backoff = time.Second
b.count = 1
}
if b.next.After(time.Now()) {
b.count++
return
}
b.backoff = 2 * b.backoff
if b.backoff > MaxHeartbeatTimeoutBackoff {
b.backoff = MaxHeartbeatTimeoutBackoff
}
b.next = time.Now().Add(b.backoff)
log.Infof("%s: warning: heartbeat time out peer=%q missed=%d backoff=%q", s.Config.Name, peer.Name, b.count, b.backoff)
}
示例10: urls
// Retrieves the URLs for all nodes using url function.
func (r *Registry) urls(leaderName, selfName string, url func(name string) (string, bool)) []string {
r.Lock()
defer r.Unlock()
// Build list including the leader and self.
urls := make([]string, 0)
if url, _ := url(leaderName); len(url) > 0 {
urls = append(urls, url)
}
// Retrieve a list of all nodes.
if e, _ := r.store.Get(RegistryKey, false, false); e != nil {
// Lookup the URL for each one.
for _, pair := range e.Node.Nodes {
_, name := filepath.Split(pair.Key)
if url, _ := url(name); len(url) > 0 && name != leaderName {
urls = append(urls, url)
}
}
}
log.Infof("URLs: %s / %s (%s)", leaderName, selfName, strings.Join(urls, ","))
return urls
}
示例11: findPeers
func (d *Discoverer) findPeers() (peers []string, err error) {
resp, err := d.client.Get(path.Join(d.prefix), false, true)
if err != nil {
return nil, err
}
node := resp.Node
if node == nil {
return nil, fmt.Errorf("%s key doesn't exist.", d.prefix)
}
for _, n := range node.Nodes {
// Skip our own entry in the list, there is no point
if strings.HasSuffix(n.Key, "/"+d.name) {
continue
}
peers = append(peers, n.Value)
}
if len(peers) == 0 {
return nil, errors.New("Discovery found an initialized cluster but no reachable peers are registered.")
}
log.Infof("Discovery found peers %v", peers)
return
}
示例12: startTransport
// Start to listen and response raft command
func (s *PeerServer) startTransport(scheme string, tlsConf tls.Config) error {
log.Infof("raft server [name %s, listen on %s, advertised url %s]", s.name, s.bindAddr, s.url)
router := mux.NewRouter()
s.httpServer = &http.Server{
Handler: router,
TLSConfig: &tlsConf,
Addr: s.bindAddr,
}
// internal commands
router.HandleFunc("/name", s.NameHttpHandler)
router.HandleFunc("/version", s.VersionHttpHandler)
router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
router.HandleFunc("/join", s.JoinHttpHandler)
router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
router.HandleFunc("/vote", s.VoteHttpHandler)
router.HandleFunc("/log", s.GetLogHttpHandler)
router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
if scheme == "http" {
return s.listenAndServe()
} else {
return s.listenAndServeTLS(s.tlsInfo.CertFile, s.tlsInfo.KeyFile)
}
}
示例13: monitorCluster
// monitorCluster assumes that the machine has tried to join the cluster and
// failed, so it waits for the interval at the beginning.
func (s *StandbyServer) monitorCluster() {
for {
timer := time.NewTimer(time.Duration(int64(s.SyncInterval * float64(time.Second))))
defer timer.Stop()
select {
case <-s.closeChan:
return
case <-timer.C:
}
if err := s.syncCluster(nil); err != nil {
log.Warnf("fail syncing cluster(%v): %v", s.ClusterURLs(), err)
continue
}
leader := s.ClusterLeader()
if leader == nil {
log.Warnf("fail getting leader from cluster(%v)", s.ClusterURLs())
continue
}
if err := s.join(leader.PeerURL); err != nil {
log.Debugf("fail joining through leader %v: %v", leader, err)
continue
}
log.Infof("join through leader %v", leader.PeerURL)
go func() {
s.Stop()
close(s.removeNotify)
}()
return
}
}
示例14: SetNOCOWFile
// SetNOCOWFile sets NOCOW flag for file
func SetNOCOWFile(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
fileinfo, err := file.Stat()
if err != nil {
return err
}
if fileinfo.IsDir() {
return fmt.Errorf("skip directory")
}
if fileinfo.Size() != 0 {
return fmt.Errorf("skip nonempty file")
}
var attr int
if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), FS_IOC_GETFLAGS, uintptr(unsafe.Pointer(&attr))); errno != 0 {
return errno
}
attr |= FS_NOCOW_FL
if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), FS_IOC_SETFLAGS, uintptr(unsafe.Pointer(&attr))); errno != 0 {
return errno
}
log.Infof("Set NOCOW to path %v succeeded", path)
return nil
}
示例15: parseFlags
// Parses non-configuration flags.
func parseFlags() {
var versionFlag bool
var cpuprofile string
f := flag.NewFlagSet(os.Args[0], -1)
f.SetOutput(ioutil.Discard)
f.BoolVar(&versionFlag, "version", false, "print the version and exit")
f.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to file")
f.Parse(os.Args[1:])
// Print version if necessary.
if versionFlag {
fmt.Println(server.ReleaseVersion)
os.Exit(0)
}
// Begin CPU profiling if specified.
if cpuprofile != "" {
f, err := os.Create(cpuprofile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
sig := <-c
log.Infof("captured %v, stopping profiler and exiting..", sig)
pprof.StopCPUProfile()
os.Exit(1)
}()
}
}