本文整理汇总了Python中exaproxy.util.log.logger.Logger类的典型用法代码示例。如果您正苦于以下问题:Python Logger类的具体用法?Python Logger怎么用?Python Logger使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Logger类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__ (self, configuration, name, program, protocol):
self.configuration = configuration
self.http_parser = self.HTTPParser(configuration)
self.tls_parser = self.TLSParser(configuration)
self.enabled = bool(program is not None) and configuration.redirector.enable
self._transparent = configuration.http.transparent
self.log = Logger('worker ' + str(name), configuration.log.worker)
self.usage = UsageLogger('usage', configuration.log.worker)
self.response_factory = self.ResponseFactory()
self.child_factory = self.ChildFactory(configuration, name)
self.wid = name # a unique name
self.creation = time.time() # when the thread was created
# self.last_worked = self.creation # when the thread last picked a task
self.program = program # the squid redirector program to fork
self.running = True # the thread is active
self.stats_timestamp = None # time of the most recent outstanding request to generate stats
self._proxy = 'ExaProxy-%s-id-%d' % (configuration.proxy.version,os.getpid())
universal = configuration.redirector.protocol == 'url'
# Do not move, we need the forking AFTER the setup
if program:
self.process = self.child_factory.createProcess(self.program, universal=universal)
else:
self.process = None
示例2: __init__
def __init__(self, name, poller, read_name, max_clients):
self.socks = {}
self.name = name
self.poller = poller
self.read_name = read_name
self.max_clients = max_clients
self.client_count = 0
self.saturated = False # we are receiving more connections than we can handle
self.binding = set()
self.serving = True # We are currenrly listening
self.log = Logger('server', configuration.log.server)
self.log.info('server [%s] accepting up to %d clients' % (name, max_clients))
示例3: Server
class Server(object):
_listen = staticmethod(listen)
def __init__(self, name, poller, read_name, max_clients):
self.socks = {}
self.name = name
self.poller = poller
self.read_name = read_name
self.max_clients = max_clients
self.client_count = 0
self.saturated = False # we are receiving more connections than we can handle
self.binding = set()
self.serving = True # We are currenrly listening
self.log = Logger('server', configuration.log.server)
self.log.info('server [%s] accepting up to %d clients' % (name, max_clients))
def accepting (self):
if self.serving:
return True
for ip, port, timeout, backlog in self.binding:
try:
self.log.critical('re-listening on %s:%d' % (ip,port))
self.listen(ip,port,timeout,backlog)
except socket.error,e:
self.log.critical('could not re-listen on %s:%d : %s' % (ip,port,str(e)))
return False
self.serving = True
return True
示例4: __init__
def __init__(self, poller, configuration):
self.total_sent4 = 0L
self.total_sent6 = 0L
self.total_requested = 0L
self.norequest = TimeCache(configuration.http.idle_connect)
self.bysock = {}
self.byname = {}
self.buffered = []
self._nextid = 0
self.poller = poller
self.log = Logger('client', configuration.log.client)
self.proxied = configuration.http.proxied
self.max_buffer = configuration.http.header_size
示例5: preExec
class ChildFactory:
def preExec (self):
os.setpgrp()
def __init__ (self, configuration, name):
self.log = Logger('worker ' + str(name), configuration.log.worker)
def createProcess (self, program, universal=False):
try:
process = subprocess.Popen([program],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=universal,
preexec_fn=self.preExec,
)
self.log.debug('spawn process %s' % program)
except KeyboardInterrupt:
process = None
except (subprocess.CalledProcessError,OSError,ValueError):
self.log.error('could not spawn process %s' % program)
process = None
if process:
try:
fcntl.fcntl(process.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
except IOError:
self.destroyProcess(process)
process = None
return process
def destroyProcess (self, process):
try:
process.terminate()
process.wait()
self.log.info('terminated process PID %s' % process.pid)
except OSError, e:
# No such processs
if e[0] != errno.ESRCH:
self.log.error('PID %s died' % process.pid)
示例6: __init__
def __init__(self, supervisor, configuration):
self.total_sent4 = 0L
self.total_sent6 = 0L
self.opening = {}
self.established = {}
self.byclientid = {}
self.buffered = []
self.retry = []
self.configuration = configuration
self.supervisor = supervisor
self.poller = supervisor.poller
self.log = Logger('download', configuration.log.download)
self.location = os.path.realpath(os.path.normpath(configuration.web.html))
self.page = supervisor.page
self._header = {}
示例7: __init__
def __init__ (self, poller, configuration, max_workers):
self.poller = poller
self.configuration = configuration
self.resolver_factory = self.resolverFactory(configuration)
# The actual work is done in the worker
self.worker = self.resolver_factory.createUDPClient()
# All currently active clients (one UDP and many TCP)
self.workers = {
self.worker.socket: self.worker
}
self.poller.addReadSocket('read_resolver', self.worker.socket)
# Track the clients currently expecting results
self.clients = {} # client_id : identifier
# Key should be the hostname rather than the request ID?
self.resolving = {} # identifier, worker_id :
# TCP workers that have not yet sent a complete request
self.sending = {} # sock :
# Maximum number of entry we will cache (1024 DNS lookup per second !)
# assuming 1k per entry, which is a lot, it mean 20Mb of memory
# which at the default of 900 seconds of cache is 22 new host per seonds
self.max_entries = 1024*20
# track the current queries and when they were started
self.active = []
self.cache = {}
self.cached = deque()
self.max_workers = max_workers
self.worker_count = len(self.workers) # the UDP client
self.waiting = []
self.log = Logger('resolver', configuration.log.resolver)
self.chained = {}
示例8: __init__
def __init__ (self, configuration, poller):
self.low = configuration.redirector.minimum # minimum concurrent redirector workers
self.high = configuration.redirector.maximum # maximum concurrent redirector workers
self.poller = poller
self.configuration = configuration
self.queue = Queue() # store requests we do not immediately have the resources to process
self.nextid = 1 # unique id to give to the next spawned worker
self.worker = {} # worker tasks for each spawned child
self.processes = {} # worker tasks indexed by file descriptors we can poll
self.available = set() # workers that are currently available to handle new requests
self.active = {} # workers that are currently busy waiting for a response from the spawned process
self.stopping = set() # workers we want to stop as soon as they stop being active
program = configuration.redirector.program
protocol = configuration.redirector.protocol
self.redirector_factory = RedirectorFactory(configuration, program, protocol)
self.log = Logger('manager', configuration.log.manager)
示例9: __init__
def __init__ (self, configuration, name, request_box, program):
self.configuration = configuration
self.icap_parser = self.ICAPParser(configuration)
self.enabled = configuration.redirector.enable
self.protocol = configuration.redirector.protocol
self._transparent = configuration.http.transparent
self.log = Logger('worker ' + str(name), configuration.log.worker)
self.usage = UsageLogger('usage', configuration.log.worker)
self.universal = True if self.protocol == 'url' else False
self.icap = self.protocol[len('icap://'):].split('/')[0] if self.protocol.startswith('icap://') else ''
r, w = os.pipe() # pipe for communication with the main thread
self.response_box_write = os.fdopen(w,'w',0) # results are written here
self.response_box_read = os.fdopen(r,'r',0) # read from the main thread
self.wid = name # a unique name
self.creation = time.time() # when the thread was created
# self.last_worked = self.creation # when the thread last picked a task
self.request_box = request_box # queue with HTTP headers to process
self.program = program # the squid redirector program to fork
self.running = True # the thread is active
self.stats_timestamp = None # time of the most recent outstanding request to generate stats
self._proxy = 'ExaProxy-%s-id-%d' % (configuration.proxy.version,os.getpid())
if self.protocol == 'url':
self.classify = self._classify_url
if self.protocol.startswith('icap://'):
self.classify = self._classify_icap
# Do not move, we need the forking AFTER the setup
self.process = self._createProcess() # the forked program to handle classification
Thread.__init__(self)
示例10: RedirectorManager
class RedirectorManager (object):
def __init__ (self,configuration,poller):
self.configuration = configuration
self.low = configuration.redirector.minimum # minimum number of workers at all time
self.high = configuration.redirector.maximum # maximum numbe of workers at all time
self.program = configuration.redirector.program # what program speaks the squid redirector API
self.nextid = 1 # incremental number to make the name of the next worker
self.queue = Queue() # queue with HTTP headers to process
self.poller = poller # poller interface that checks for events on sockets
self.worker = {} # our workers threads
self.closing = set() # workers that are currently closing
self.running = True # we are running
self.log = Logger('manager', configuration.log.manager)
def _getid(self):
id = str(self.nextid)
self.nextid +=1
return id
def _spawn (self):
"""add one worker to the pool"""
wid = self._getid()
worker = Redirector(self.configuration,wid,self.queue,self.program)
self.poller.addReadSocket('read_workers', worker.response_box_read)
self.worker[wid] = worker
self.log.info("added a worker")
self.log.info("we have %d workers. defined range is ( %d / %d )" % (len(self.worker),self.low,self.high))
self.worker[wid].start()
def spawn (self,number=1):
"""create the set number of worker"""
self.log.info("spawning %d more worker" % number)
for _ in range(number):
self._spawn()
def respawn (self):
"""make sure we reach the minimum number of workers"""
number = max(min(len(self.worker),self.high),self.low)
for wid in set(self.worker):
self.reap(wid)
self.spawn(number)
def reap (self,wid):
self.log.info('we are killing worker %s' % wid)
worker = self.worker[wid]
self.closing.add(wid)
worker.stop() # will cause the worker to stop when it can
def decrease (self):
if self.low < len(self.worker):
worker = self._oldest()
if worker:
self.reap(worker.wid)
def increase (self):
if len(self.worker) < self.high:
self.spawn()
def start (self):
"""spawn our minimum number of workers"""
self.log.info("starting workers.")
self.spawn(max(0,self.low-len(self.worker)))
def stop (self):
"""tell all our worker to stop reading the queue and stop"""
self.running = False
threads = self.worker.values()
if len(self.worker):
self.log.info("stopping %d workers." % len(self.worker))
for wid in set(self.worker):
self.reap(wid)
for thread in threads:
self.request(None, None, None, 'nop')
for thread in threads:
thread.destroyProcess()
thread.join()
self.worker = {}
def _oldest (self):
"""find the oldest worker"""
oldest = None
past = time.time()
for wid in set(self.worker):
creation = self.worker[wid].creation
if creation < past and wid not in self.closing:
past = creation
oldest = self.worker[wid]
return oldest
def provision (self):
"""manage our workers to make sure we have enough to consume the queue"""
if not self.running:
return
num_workers = len(self.worker)
#.........这里部分代码省略.........
示例11: __init__
def __init__ (self,configuration):
configuration = load()
self.configuration = configuration
# Only here so the introspection code can find them
self.log = Logger('supervisor', configuration.log.supervisor)
self.log.error('Starting exaproxy version %s' % configuration.proxy.version)
self.signal_log = Logger('signal', configuration.log.signal)
self.log_writer = SysLogWriter('log', configuration.log.destination, configuration.log.enable, level=configuration.log.level)
self.usage_writer = UsageWriter('usage', configuration.usage.destination, configuration.usage.enable)
sys.exitfunc = self.log_writer.writeMessages
self.log_writer.setIdentifier(configuration.daemon.identifier)
#self.usage_writer.setIdentifier(configuration.daemon.identifier)
if configuration.debug.log:
self.log_writer.toggleDebug()
self.usage_writer.toggleDebug()
self.log.error('python version %s' % sys.version.replace(os.linesep,' '))
self.log.debug('starting %s' % sys.argv[0])
self.pid = PID(self.configuration)
self.daemon = Daemon(self.configuration)
self.poller = Poller(self.configuration.daemon)
self.poller.setupRead('read_proxy') # Listening proxy sockets
self.poller.setupRead('read_web') # Listening webserver sockets
self.poller.setupRead('read_icap') # Listening icap sockets
self.poller.setupRead('read_redirector') # Pipes carrying responses from the redirector process
self.poller.setupRead('read_resolver') # Sockets currently listening for DNS responses
self.poller.setupRead('read_client') # Active clients
self.poller.setupRead('opening_client') # Clients we have not yet read a request from
self.poller.setupWrite('write_client') # Active clients with buffered data to send
self.poller.setupWrite('write_resolver') # Active DNS requests with buffered data to send
self.poller.setupRead('read_download') # Established connections
self.poller.setupWrite('write_download') # Established connections we have buffered data to send to
self.poller.setupWrite('opening_download') # Opening connections
self.monitor = Monitor(self)
self.page = Page(self)
self.content = ContentManager(self,configuration)
self.client = ClientManager(self.poller, configuration)
self.resolver = ResolverManager(self.poller, self.configuration, configuration.dns.retries*10)
self.proxy = Server('http proxy',self.poller,'read_proxy', configuration.http.connections)
self.web = Server('web server',self.poller,'read_web', configuration.web.connections)
self.icap = Server('icap server',self.poller,'read_icap', configuration.icap.connections)
self._shutdown = True if self.daemon.filemax == 0 else False # stop the program
self._softstop = False # stop once all current connection have been dealt with
self._reload = False # unimplemented
self._toggle_debug = False # start logging a lot
self._decrease_spawn_limit = 0
self._increase_spawn_limit = 0
self._refork = False # unimplemented
self._pdb = False # turn on pdb debugging
self._listen = None # listening change ? None: no, True: listen, False: stop listeing
self.wait_time = 5.0 # how long do we wait at maximum once we have been soft-killed
self.local = set() # what addresses are on our local interfaces
if not self.initialise():
self._shutdown = True
elif self.daemon.drop_privileges():
self.log.critical('Could not drop privileges to \'%s\'. Refusing to run as root' % self.daemon.user)
self.log.critical('Set the environment value USER to change the unprivileged user')
self._shutdown = True
# fork the redirector process before performing any further setup
redirector = fork_redirector(self.poller, self.configuration)
# create threads _after_ all forking is done
self.redirector = redirector_message_thread(redirector)
self.reactor = Reactor(self.configuration, self.web, self.proxy, self.icap, self.redirector, self.content, self.client, self.resolver, self.log_writer, self.usage_writer, self.poller)
self.interfaces()
signal.signal(signal.SIGQUIT, self.sigquit)
signal.signal(signal.SIGINT, self.sigterm)
signal.signal(signal.SIGTERM, self.sigterm)
# signal.signal(signal.SIGABRT, self.sigabrt)
# signal.signal(signal.SIGHUP, self.sighup)
signal.signal(signal.SIGTRAP, self.sigtrap)
signal.signal(signal.SIGUSR1, self.sigusr1)
signal.signal(signal.SIGUSR2, self.sigusr2)
signal.signal(signal.SIGTTOU, self.sigttou)
signal.signal(signal.SIGTTIN, self.sigttin)
signal.signal(signal.SIGALRM, self.sigalrm)
# make sure we always have data in history
# (done in zero for dependencies reasons)
#.........这里部分代码省略.........
示例12: Supervisor
class Supervisor (object):
alarm_time = 0.1 # regular backend work
second_frequency = int(1/alarm_time) # when we record history
minute_frequency = int(60/alarm_time) # when we want to average history
increase_frequency = int(5/alarm_time) # when we add workers
decrease_frequency = int(60/alarm_time) # when we remove workers
saturation_frequency = int(20/alarm_time) # when we report connection saturation
interface_frequency = int(300/alarm_time) # when we check for new interfaces
# import os
# clear = [hex(ord(c)) for c in os.popen('clear').read()]
# clear = ''.join([chr(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']])
def __init__ (self,configuration):
configuration = load()
self.configuration = configuration
# Only here so the introspection code can find them
self.log = Logger('supervisor', configuration.log.supervisor)
self.log.error('Starting exaproxy version %s' % configuration.proxy.version)
self.signal_log = Logger('signal', configuration.log.signal)
self.log_writer = SysLogWriter('log', configuration.log.destination, configuration.log.enable, level=configuration.log.level)
self.usage_writer = UsageWriter('usage', configuration.usage.destination, configuration.usage.enable)
sys.exitfunc = self.log_writer.writeMessages
self.log_writer.setIdentifier(configuration.daemon.identifier)
#self.usage_writer.setIdentifier(configuration.daemon.identifier)
if configuration.debug.log:
self.log_writer.toggleDebug()
self.usage_writer.toggleDebug()
self.log.error('python version %s' % sys.version.replace(os.linesep,' '))
self.log.debug('starting %s' % sys.argv[0])
self.pid = PID(self.configuration)
self.daemon = Daemon(self.configuration)
self.poller = Poller(self.configuration.daemon)
self.poller.setupRead('read_proxy') # Listening proxy sockets
self.poller.setupRead('read_web') # Listening webserver sockets
self.poller.setupRead('read_icap') # Listening icap sockets
self.poller.setupRead('read_redirector') # Pipes carrying responses from the redirector process
self.poller.setupRead('read_resolver') # Sockets currently listening for DNS responses
self.poller.setupRead('read_client') # Active clients
self.poller.setupRead('opening_client') # Clients we have not yet read a request from
self.poller.setupWrite('write_client') # Active clients with buffered data to send
self.poller.setupWrite('write_resolver') # Active DNS requests with buffered data to send
self.poller.setupRead('read_download') # Established connections
self.poller.setupWrite('write_download') # Established connections we have buffered data to send to
self.poller.setupWrite('opening_download') # Opening connections
self.monitor = Monitor(self)
self.page = Page(self)
self.content = ContentManager(self,configuration)
self.client = ClientManager(self.poller, configuration)
self.resolver = ResolverManager(self.poller, self.configuration, configuration.dns.retries*10)
self.proxy = Server('http proxy',self.poller,'read_proxy', configuration.http.connections)
self.web = Server('web server',self.poller,'read_web', configuration.web.connections)
self.icap = Server('icap server',self.poller,'read_icap', configuration.icap.connections)
self._shutdown = True if self.daemon.filemax == 0 else False # stop the program
self._softstop = False # stop once all current connection have been dealt with
self._reload = False # unimplemented
self._toggle_debug = False # start logging a lot
self._decrease_spawn_limit = 0
self._increase_spawn_limit = 0
self._refork = False # unimplemented
self._pdb = False # turn on pdb debugging
self._listen = None # listening change ? None: no, True: listen, False: stop listeing
self.wait_time = 5.0 # how long do we wait at maximum once we have been soft-killed
self.local = set() # what addresses are on our local interfaces
if not self.initialise():
self._shutdown = True
elif self.daemon.drop_privileges():
self.log.critical('Could not drop privileges to \'%s\'. Refusing to run as root' % self.daemon.user)
self.log.critical('Set the environment value USER to change the unprivileged user')
self._shutdown = True
# fork the redirector process before performing any further setup
redirector = fork_redirector(self.poller, self.configuration)
# create threads _after_ all forking is done
self.redirector = redirector_message_thread(redirector)
self.reactor = Reactor(self.configuration, self.web, self.proxy, self.icap, self.redirector, self.content, self.client, self.resolver, self.log_writer, self.usage_writer, self.poller)
self.interfaces()
signal.signal(signal.SIGQUIT, self.sigquit)
signal.signal(signal.SIGINT, self.sigterm)
signal.signal(signal.SIGTERM, self.sigterm)
# signal.signal(signal.SIGABRT, self.sigabrt)
#.........这里部分代码省略.........
示例13: load
"""
# http://code.google.com/speed/articles/web-metrics.html
import select
import socket
import errno
from exaproxy.network.errno_list import errno_block, errno_fatal
from interface import IPoller
from exaproxy.util.log.logger import Logger
from exaproxy.configuration import load
configuration = load()
log = Logger('select', configuration.log.server)
def poll_select(read, write, timeout=None):
try:
r, w, x = select.select(read, write, read + write, timeout)
except socket.error, e:
if e.args[0] in errno_block:
log.error('select not ready, errno %d: %s' % (e.args[0], errno.errorcode.get(e.args[0], '')))
return [], [], []
if e.args[0] in errno_fatal:
log.error('select problem, errno %d: %s' % (e.args[0], errno.errorcode.get(e.args[0], '')))
log.error('poller read : %s' % str(read))
log.error('poller write : %s' % str(write))
log.error('read : %s' % str(read))
else:
示例14: ResolverManager
class ResolverManager (object):
resolverFactory = DNSResolver
def __init__ (self, poller, configuration, max_workers):
self.poller = poller
self.configuration = configuration
self.resolver_factory = self.resolverFactory(configuration)
# The actual work is done in the worker
self.worker = self.resolver_factory.createUDPClient()
# All currently active clients (one UDP and many TCP)
self.workers = {}
self.workers[self.worker.socket] = self.worker
self.poller.addReadSocket('read_resolver', self.worker.socket)
# Track the clients currently expecting results
self.clients = {} # client_id : identifier
# Key should be the hostname rather than the request ID?
self.resolving = {} # identifier, worker_id :
# TCP workers that have not yet sent a complete request
self.sending = {} # sock :
# Maximum number of entry we will cache (1024 DNS lookup per second !)
# assuming 1k per entry, which is a lot, it mean 20Mb of memory
# which at the default of 900 seconds of cache is 22 new host per seonds
self.max_entries = 1024*20
# track the current queries and when they were started
self.active = []
self.cache = {}
self.cached = deque()
self.max_workers = max_workers
self.worker_count = len(self.workers) # the UDP client
self.waiting = []
self.log = Logger('resolver', configuration.log.resolver)
self.chained = {}
def cacheDestination (self, hostname, ip):
if hostname not in self.cache:
expire_time = time.time() + self.configuration.dns.ttl
expire_time = expire_time - expire_time % 5 # group the DNS record per buckets 5 seconds
latest_time, latest_hosts = self.cached[-1] if self.cached else (-1, None)
if expire_time > latest_time:
hosts = []
self.cached.append((expire_time, hosts))
else:
hosts = latest_hosts
self.cache[hostname] = ip
hosts.append(hostname)
def expireCache (self):
# expire only one set of cache entries at a time
if self.cached:
current_time = time.time()
expire_time, hosts = self.cached[0]
if current_time >= expire_time or len(self.cache) > self.max_entries:
expire_time, hosts = self.cached.popleft()
for hostname in hosts:
self.cache.pop(hostname, None)
def cleanup(self):
now = time.time()
cutoff = now - self.configuration.dns.timeout
count = 0
for timestamp, client_id, sock in self.active:
if timestamp > cutoff:
break
count += 1
cli_data = self.clients.pop(client_id, None)
worker = self.workers.get(sock)
tcpudp = 'udp' if worker is self.worker else 'tcp'
if cli_data is not None:
w_id, identifier, active_time, resolve_count = cli_data
data = self.resolving.pop((w_id, identifier), None)
if not data:
data = self.sending.pop(sock, None)
if data:
client_id, original, hostname, command, decision = data
self.log.error('timeout when requesting address for %s using the %s client - attempt %s' % (hostname, tcpudp, resolve_count))
if resolve_count < self.configuration.dns.retries and worker is self.worker:
self.log.info('going to retransmit request for %s - attempt %s of %s' % (hostname, resolve_count+1, self.configuration.dns.retries))
self.startResolving(client_id, command, decision, resolve_count+1, identifier=identifier)
#.........这里部分代码省略.........
示例15: Page
class Page (object):
def __init__(self,supervisor):
self.supervisor = supervisor
self.monitor = supervisor.monitor
self.email_sent = False
self.log = Logger('web', supervisor.configuration.log.web)
def _introspection (self,objects):
introduction = '<div style="padding: 10px 10px 10px 10px; font-weight:bold;">Looking at the internal of ExaProxy for %s </div><br/>\n' % cgi.escape('.'.join(objects))
link = cgi.escape('/'.join(objects[:-1])) if objects[:-1] else 'supervisor'
line = ['<a href="/information/introspection/%s.html">Back to parent object</a><br/>' % link]
for k,content in self.monitor.introspection(objects):
link = '/information/introspection/%s.html' % cgi.escape('%s/%s' % ('/'.join(objects),k))
line.append('<a href="%s">%s</a><span class="value">%s</span><br/>' % (link,k,cgi.escape(content)))
return introduction + _listing % ('\n'.join(line))
def _configuration (self):
introduction = '<div style="padding: 10px 10px 10px 10px; font-weight:bold;">ExaProxy Configuration</div><br/>\n'
line = []
for k,v in sorted(self.monitor.configuration().items()):
line.append('<span class="key">%s</span><span class="value"> %s</span><br/>' % (k,cgi.escape(str(v))))
return introduction + _listing % ('\n'.join(line))
def _statistics (self):
introduction = '<div style="padding: 10px 10px 10px 10px; font-weight:bold;">ExaProxy Statistics</div><br/>\n'
line = []
for k,v in sorted(self.monitor.statistics().items()):
line.append('<span class="key">%s</span><span class="value"> %s</span><br/>' % (k,cgi.escape(str(str(v)))))
return introduction + _listing % ('\n'.join(line))
def _connections (self):
return graph(
self.monitor,
'Connections',
20000,
[
'clients.silent',
'clients.speaking',
'servers.opening',
'servers.established',
]
)
def _processes (self):
return graph(
self.monitor,
'Forked processes',
20000,
[
'processes.forked',
'processes.min',
'processes.max',
]
)
def _requests (self):
return graph(
self.monitor,
'Requests/seconds received from clients',
20000,
[
'clients.requests',
],
True,
)
def _clients (self):
return graph(
self.monitor,
'Bits/seconds received from clients',
20000,
[
'transfer.client4',
'transfer.client6',
],
True,
adaptor=Bpstobps,
)
def _servers (self):
return graph(
self.monitor,
'Bits/seconds received from servers',
20000,
[
'transfer.content4',
'transfer.content6',
],
True,
adaptor=Bpstobps,
)
def _transfer (self):
return graph(
self.monitor,
'Bits/seconds received',
20000,
[
'transfer.client',
#.........这里部分代码省略.........