本文整理匯總了Golang中github.com/facebookgo/stats.BumpSum函數的典型用法代碼示例。如果您正苦於以下問題:Golang BumpSum函數的具體用法?Golang BumpSum怎麽用?Golang BumpSum使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了BumpSum函數的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Acquire
// Acquire will pull a resource from the pool or create a new one if necessary.
func (p *Pool) Acquire() (io.Closer, error) {
elapsedTime := stats.BumpTime(p.Stats, "acquire.time")
defer elapsedTime.End()
p.manageOnce.Do(p.goManage)
r := make(chan io.Closer)
p.acquire <- r
c := <-r
// sentinel value indicates the pool is closed
if c == closedSentinel {
return nil, errPoolClosed
}
// need to allocate a new resource
if c == newSentinel {
t := stats.BumpTime(p.Stats, "acquire.new.time")
c, err := p.New()
t.End()
stats.BumpSum(p.Stats, "acquire.new", 1)
if err != nil {
stats.BumpSum(p.Stats, "acquire.error.new", 1)
// discard our assumed checked out resource since we failed to New
p.discard <- returnResource{resource: newSentinel}
} else {
p.new <- c
}
return c, err
}
// successfully acquired from pool
return c, nil
}
示例2: clientReadHeader
func (p *Proxy) clientReadHeader(c net.Conn, timeout time.Duration) (*messageHeader, error) {
t := stats.BumpTime(p.stats, "client.read.header.time")
type headerError struct {
header *messageHeader
error error
}
resChan := make(chan headerError)
c.SetReadDeadline(time.Now().Add(timeout))
go func() {
h, err := readHeader(c)
resChan <- headerError{header: h, error: err}
}()
closed := false
var response headerError
select {
case response = <-resChan:
// all good
case <-p.closed:
closed = true
c.SetReadDeadline(timeInPast)
response = <-resChan
}
// Successfully read a header.
if response.error == nil {
t.End()
return response.header, nil
}
// Client side disconnected.
if response.error == io.EOF {
stats.BumpSum(p.stats, "client.clean.disconnect", 1)
return nil, errNormalClose
}
// We hit our ReadDeadline.
if ne, ok := response.error.(net.Error); ok && ne.Timeout() {
if closed {
stats.BumpSum(p.stats, "client.clean.disconnect", 1)
return nil, errNormalClose
}
return nil, errClientReadTimeout
}
// Some other unknown error.
stats.BumpSum(p.stats, "client.error.disconnect", 1)
p.Log.Error(response.error)
return nil, response.error
}
示例3: proxyMessage
// proxyMessage proxies a message, possibly it's response, and possibly a
// follow up call.
func (p *Proxy) proxyMessage(
h *messageHeader,
client net.Conn,
server net.Conn,
lastError *LastError,
) error {
p.Log.Debugf("proxying message %s from %s for %s", h, client.RemoteAddr(), p)
deadline := time.Now().Add(p.ReplicaSet.MessageTimeout)
server.SetDeadline(deadline)
client.SetDeadline(deadline)
// OpQuery may need to be transformed and need special handling in order to
// make the proxy transparent.
if h.OpCode == OpQuery {
stats.BumpSum(p.stats, "message.with.response", 1)
return p.ReplicaSet.ProxyQuery.Proxy(h, client, server, lastError)
}
// Anything besides a getlasterror call (which requires an OpQuery) resets
// the lastError.
if lastError.Exists() {
p.Log.Debug("reset getLastError cache")
lastError.Reset()
}
// For other Ops we proxy the header & raw body over.
if err := h.WriteTo(server); err != nil {
p.Log.Error(err)
return err
}
if _, err := io.CopyN(server, client, int64(h.MessageLength-headerLen)); err != nil {
p.Log.Error(err)
return err
}
// For Ops with responses we proxy the raw response message over.
if h.OpCode.HasResponse() {
stats.BumpSum(p.stats, "message.with.response", 1)
if err := copyMessage(client, server); err != nil {
p.Log.Error(err)
return err
}
}
return nil
}
示例4: gleClientReadHeader
func (p *Proxy) gleClientReadHeader(c net.Conn) (*messageHeader, error) {
h, err := p.clientReadHeader(c, p.ReplicaSet.GetLastErrorTimeout)
if err == errClientReadTimeout {
stats.BumpSum(p.stats, "client.gle.timeout", 1)
}
return h, err
}
示例5: Stop
func (s *server) Stop() error {
s.stopOnce.Do(func() {
defer stats.BumpTime(s.stats, "stop.time").End()
stats.BumpSum(s.stats, "stop", 1)
// first disable keep-alive for new connections
s.server.SetKeepAlivesEnabled(false)
// then close the listener so new connections can't connect come thru
closeErr := s.listener.Close()
<-s.serveDone
// then trigger the background goroutine to stop and wait for it
stopDone := make(chan struct{})
s.stop <- stopDone
// wait for stop
select {
case <-stopDone:
case <-s.clock.After(s.stopTimeout):
defer stats.BumpTime(s.stats, "kill.time").End()
stats.BumpSum(s.stats, "kill", 1)
// stop timed out, wait for kill
killDone := make(chan struct{})
s.kill <- killDone
select {
case <-killDone:
case <-s.clock.After(s.killTimeout):
// kill timed out, give up
stats.BumpSum(s.stats, "kill.timeout", 1)
}
}
if closeErr != nil && !isUseOfClosedError(closeErr) {
stats.BumpSum(s.stats, "listener.close.error", 1)
s.stopErr = closeErr
}
})
return s.stopErr
}
示例6: ListenAndServe
// ListenAndServe returns a Server for the given http.Server. It is equivalent
// to ListenAndServe from the standard library, but returns immediately.
// Requests will be accepted in a background goroutine. If the http.Server has
// a non-nil TLSConfig, a TLS enabled listener will be setup.
func (h HTTP) ListenAndServe(s *http.Server) (Server, error) {
addr := s.Addr
if addr == "" {
if s.TLSConfig == nil {
addr = ":http"
} else {
addr = ":https"
}
}
l, err := net.Listen("tcp", addr)
if err != nil {
stats.BumpSum(h.Stats, "listen.error", 1)
return nil, err
}
if s.TLSConfig != nil {
l = tls.NewListener(l, s.TLSConfig)
}
return h.Serve(s, l), nil
}
示例7: manage
func (p *Pool) manage() {
klock := p.Clock
if klock == nil {
klock = clock.New()
}
// setup goroutines to close resources
closers := make(chan io.Closer)
var closeWG sync.WaitGroup
closeWG.Add(int(p.ClosePoolSize))
for i := uint(0); i < p.ClosePoolSize; i++ {
go func() {
defer closeWG.Done()
for c := range closers {
t := stats.BumpTime(p.Stats, "close.time")
stats.BumpSum(p.Stats, "close", 1)
if err := c.Close(); err != nil {
stats.BumpSum(p.Stats, "close.error", 1)
p.CloseErrorHandler(err)
}
t.End()
}
}()
}
// setup a ticker to report various averages every minute. if we don't have a
// Stats implementation provided, we Stop it so it never ticks.
statsTicker := klock.Ticker(time.Minute)
if p.Stats == nil {
statsTicker.Stop()
}
resources := []entry{}
outResources := map[io.Closer]struct{}{}
out := uint(0)
waiting := list.New()
idleTicker := klock.Ticker(p.IdleTimeout)
closed := false
var closeResponse chan error
for {
if closed && out == 0 && waiting.Len() == 0 {
if p.Stats != nil {
statsTicker.Stop()
}
// all waiting acquires are done, all resources have been released.
// now just wait for all resources to close.
close(closers)
closeWG.Wait()
// close internal channels.
close(p.acquire)
close(p.new)
close(p.release)
close(p.discard)
close(p.close)
// return a response to the original close.
closeResponse <- nil
return
}
select {
case r := <-p.acquire:
// if closed, new acquire calls are rejected
if closed {
r <- closedSentinel
stats.BumpSum(p.Stats, "acquire.error.closed", 1)
continue
}
// acquire from pool
if cl := len(resources); cl > 0 {
c := resources[cl-1]
outResources[c.resource] = struct{}{}
r <- c.resource
resources = resources[:cl-1]
out++
stats.BumpSum(p.Stats, "acquire.pool", 1)
continue
}
// max resources already in use, need to block & wait
if out == p.Max {
waiting.PushBack(r)
stats.BumpSum(p.Stats, "acquire.waiting", 1)
continue
}
// Make a new resource in the calling goroutine by sending it a
// newSentinel. We assume it's checked out. Acquire will discard if
// creating a new resource fails.
out++
r <- newSentinel
case c := <-p.new:
outResources[c] = struct{}{}
case rr := <-p.release:
// ensure we're dealing with a resource acquired thru us
if _, found := outResources[rr.resource]; !found {
//.........這裏部分代碼省略.........
示例8: clientServeLoop
// clientServeLoop loops on a single client connected to the proxy and
// dispatches its requests.
func (p *Proxy) clientServeLoop(c net.Conn) {
remoteIP := c.RemoteAddr().(*net.TCPAddr).IP.String()
// enforce per-client max connection limit
if p.maxPerClientConnections.inc(remoteIP) {
c.Close()
stats.BumpSum(p.stats, "client.rejected.max.connections", 1)
p.Log.Errorf("rejecting client connection due to max connections limit: %s", remoteIP)
return
}
// turn on TCP keep-alive and set it to the recommended period of 2 minutes
// http://docs.mongodb.org/manual/faq/diagnostics/#faq-keepalive
if conn, ok := c.(*net.TCPConn); ok {
conn.SetKeepAlivePeriod(2 * time.Minute)
conn.SetKeepAlive(true)
}
c = teeIf(fmt.Sprintf("client %s <=> %s", c.RemoteAddr(), p), c)
p.Log.Infof("client %s connected to %s", c.RemoteAddr(), p)
stats.BumpSum(p.stats, "client.connected", 1)
p.ReplicaSet.ClientsConnected.Inc(1)
defer func() {
p.ReplicaSet.ClientsConnected.Dec(1)
p.Log.Infof("client %s disconnected from %s", c.RemoteAddr(), p)
p.wg.Done()
if err := c.Close(); err != nil {
p.Log.Error(err)
}
p.maxPerClientConnections.dec(remoteIP)
}()
var lastError LastError
for {
h, err := p.idleClientReadHeader(c)
if err != nil {
if err != errNormalClose {
p.Log.Error(err)
}
return
}
mpt := stats.BumpTime(p.stats, "message.proxy.time")
serverConn, err := p.getServerConn()
if err != nil {
if err != errNormalClose {
p.Log.Error(err)
}
return
}
scht := stats.BumpTime(p.stats, "server.conn.held.time")
for {
err := p.proxyMessage(h, c, serverConn, &lastError)
if err != nil {
p.serverPool.Discard(serverConn)
p.Log.Error(err)
stats.BumpSum(p.stats, "message.proxy.error", 1)
if ne, ok := err.(net.Error); ok && ne.Timeout() {
stats.BumpSum(p.stats, "message.proxy.timeout", 1)
}
if err == errRSChanged {
go p.ReplicaSet.Restart()
}
return
}
// One message was proxied, stop it's timer.
mpt.End()
if !h.OpCode.IsMutation() {
break
}
// If the operation we just performed was a mutation, we always make the
// follow up request on the same server because it's possibly a getLastErr
// call which expects this behavior.
stats.BumpSum(p.stats, "message.with.mutation", 1)
h, err = p.gleClientReadHeader(c)
if err != nil {
// Client did not make _any_ query within the GetLastErrorTimeout.
// Return the server to the pool and wait go back to outer loop.
if err == errClientReadTimeout {
break
}
// Prevent noise of normal client disconnects, but log if anything else.
if err != errNormalClose {
p.Log.Error(err)
}
// We need to return our server to the pool (it's still good as far
// as we know).
p.serverPool.Release(serverConn)
return
}
// Successfully read message when waiting for the getLastError call.
mpt = stats.BumpTime(p.stats, "message.proxy.time")
//.........這裏部分代碼省略.........
示例9: serve
func (s *server) serve() {
stats.BumpSum(s.stats, "serve", 1)
s.serveErr <- s.server.Serve(s.listener)
close(s.serveDone)
close(s.serveErr)
}
示例10: manage
func (s *server) manage() {
defer func() {
close(s.new)
close(s.active)
close(s.idle)
close(s.closed)
close(s.stop)
close(s.kill)
}()
var stopDone chan struct{}
conns := map[net.Conn]http.ConnState{}
var countNew, countActive, countIdle float64
// decConn decrements the count associated with the current state of the
// given connection.
decConn := func(c net.Conn) {
switch conns[c] {
default:
panic(fmt.Errorf("unknown existing connection: %s", c))
case http.StateNew:
countNew--
case http.StateActive:
countActive--
case http.StateIdle:
countIdle--
}
}
// setup a ticker to report various values every minute. if we don't have a
// Stats implementation provided, we Stop it so it never ticks.
statsTicker := s.clock.Ticker(time.Minute)
if s.stats == nil {
statsTicker.Stop()
}
for {
select {
case <-statsTicker.C:
// we'll only get here when s.stats is not nil
s.stats.BumpAvg("http-state.new", countNew)
s.stats.BumpAvg("http-state.active", countActive)
s.stats.BumpAvg("http-state.idle", countIdle)
s.stats.BumpAvg("http-state.total", countNew+countActive+countIdle)
case c := <-s.new:
conns[c] = http.StateNew
countNew++
case c := <-s.active:
decConn(c)
countActive++
conns[c] = http.StateActive
case c := <-s.idle:
decConn(c)
countIdle++
conns[c] = http.StateIdle
// if we're already stopping, close it
if stopDone != nil {
c.Close()
}
case c := <-s.closed:
stats.BumpSum(s.stats, "conn.closed", 1)
decConn(c)
delete(conns, c)
// if we're waiting to stop and are all empty, we just closed the last
// connection and we're done.
if stopDone != nil && len(conns) == 0 {
close(stopDone)
return
}
case stopDone = <-s.stop:
// if we're already all empty, we're already done
if len(conns) == 0 {
close(stopDone)
return
}
// close current idle connections right away
for c, cs := range conns {
if cs == http.StateIdle {
c.Close()
}
}
// continue the loop and wait for all the ConnState updates which will
// eventually close(stopDone) and return from this goroutine.
case killDone := <-s.kill:
// force close all connections
stats.BumpSum(s.stats, "kill.conn.count", float64(len(conns)))
for c := range conns {
c.Close()
}
// don't block the kill.
close(killDone)
//.........這裏部分代碼省略.........