本文整理汇总了Python中exaproxy.util.log.logger.Logger.debug方法的典型用法代码示例。如果您正苦于以下问题:Python Logger.debug方法的具体用法?Python Logger.debug怎么用?Python Logger.debug使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类exaproxy.util.log.logger.Logger
的用法示例。
在下文中一共展示了Logger.debug方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preExec
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import debug [as 别名]
class ChildFactory:
def preExec (self):
os.setpgrp()
def __init__ (self, configuration, name):
self.log = Logger('worker ' + str(name), configuration.log.worker)
def createProcess (self, program, universal=False):
try:
process = subprocess.Popen([program],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=universal,
preexec_fn=self.preExec,
)
self.log.debug('spawn process %s' % program)
except KeyboardInterrupt:
process = None
except (subprocess.CalledProcessError,OSError,ValueError):
self.log.error('could not spawn process %s' % program)
process = None
if process:
try:
fcntl.fcntl(process.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
except IOError:
self.destroyProcess(process)
process = None
return process
def destroyProcess (self, process):
try:
process.terminate()
process.wait()
self.log.info('terminated process PID %s' % process.pid)
except OSError, e:
# No such processs
if e[0] != errno.ESRCH:
self.log.error('PID %s died' % process.pid)
示例2: ClientManager
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import debug [as 别名]
#.........这里部分代码省略.........
self.poller.uncorkReadSocket('read_client', client.sock)
if res is not None:
buffered, had_buffer, sent4, sent6 = res
# buffered data we read with the HTTP headers
name, peer, request, content = client.readRelated(mode,nb_to_read)
if request:
self.total_requested += 1
self.log.info('reading multiple requests')
self.cleanup(client.sock, name)
buffered, had_buffer = None, None
content = None
elif request is None:
self.cleanup(client.sock, name)
buffered, had_buffer = None, None
content = None
else:
# we cannot write to the client so clean it up
self.cleanup(client.sock, name)
buffered, had_buffer = None, None
content = None
if buffered:
if client.sock not in self.buffered:
self.buffered.append(client.sock)
# watch for the socket's send buffer becoming less than full
self.poller.addWriteSocket('write_client', client.sock)
elif had_buffer and client.sock in self.buffered:
self.buffered.remove(client.sock)
# we no longer care about writing to the client
self.poller.removeWriteSocket('write_client', client.sock)
else:
content = None
return client, content, source
def corkUploadByName(self, name):
client, source = self.byname.get(name, (None, None))
if client:
self.poller.corkReadSocket('read_client', client.sock)
def uncorkUploadByName(self, name):
client, source = self.byname.get(name, (None, None))
if client:
if client.sock in self.bysock:
self.poller.uncorkReadSocket('read_client', client.sock)
def cleanup(self, sock, name):
self.log.debug('cleanup for socket %s' % sock)
client, source = self.bysock.get(sock, (None,None))
client, source = (client,None) if client else self.norequest.get(sock, (None,None))
client, source = (client,None) or self.byname.get(name, (None,None))
self.bysock.pop(sock, None)
self.norequest.pop(sock, (None,None))
self.byname.pop(name, None)
if client:
self.poller.removeWriteSocket('write_client', client.sock)
self.poller.removeReadSocket('read_client', client.sock)
self.poller.removeReadSocket('opening_client', client.sock)
client.shutdown()
else:
self.log.error('COULD NOT CLEAN UP SOCKET %s' % sock)
if sock in self.buffered:
self.buffered.remove(sock)
def softstop (self):
if len(self.byname) > 0 or len(self.norequest) > 0:
return False
self.log.critical('no more client connection, exiting.')
return True
def stop(self):
for client, source in self.bysock.itervalues():
client.shutdown()
for client, source in self.norequest.itervalues():
client.shutdown()
self.poller.clearRead('read_client')
self.poller.clearRead('opening_client')
self.poller.clearWrite('write_client')
self.bysock = {}
self.norequest = {}
self.byname = {}
self.buffered = []
示例3: Supervisor
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import debug [as 别名]
class Supervisor (object):
alarm_time = 0.1 # regular backend work
second_frequency = int(1/alarm_time) # when we record history
minute_frequency = int(60/alarm_time) # when we want to average history
increase_frequency = int(5/alarm_time) # when we add workers
decrease_frequency = int(60/alarm_time) # when we remove workers
saturation_frequency = int(20/alarm_time) # when we report connection saturation
interface_frequency = int(300/alarm_time) # when we check for new interfaces
# import os
# clear = [hex(ord(c)) for c in os.popen('clear').read()]
# clear = ''.join([chr(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']])
def __init__ (self,configuration):
configuration = load()
self.configuration = configuration
# Only here so the introspection code can find them
self.log = Logger('supervisor', configuration.log.supervisor)
self.log.error('Starting exaproxy version %s' % configuration.proxy.version)
self.signal_log = Logger('signal', configuration.log.signal)
self.log_writer = SysLogWriter('log', configuration.log.destination, configuration.log.enable, level=configuration.log.level)
self.usage_writer = UsageWriter('usage', configuration.usage.destination, configuration.usage.enable)
sys.exitfunc = self.log_writer.writeMessages
self.log_writer.setIdentifier(configuration.daemon.identifier)
#self.usage_writer.setIdentifier(configuration.daemon.identifier)
if configuration.debug.log:
self.log_writer.toggleDebug()
self.usage_writer.toggleDebug()
self.log.error('python version %s' % sys.version.replace(os.linesep,' '))
self.log.debug('starting %s' % sys.argv[0])
self.pid = PID(self.configuration)
self.daemon = Daemon(self.configuration)
self.poller = Poller(self.configuration.daemon)
self.poller.setupRead('read_proxy') # Listening proxy sockets
self.poller.setupRead('read_web') # Listening webserver sockets
self.poller.setupRead('read_icap') # Listening icap sockets
self.poller.setupRead('read_redirector') # Pipes carrying responses from the redirector process
self.poller.setupRead('read_resolver') # Sockets currently listening for DNS responses
self.poller.setupRead('read_client') # Active clients
self.poller.setupRead('opening_client') # Clients we have not yet read a request from
self.poller.setupWrite('write_client') # Active clients with buffered data to send
self.poller.setupWrite('write_resolver') # Active DNS requests with buffered data to send
self.poller.setupRead('read_download') # Established connections
self.poller.setupWrite('write_download') # Established connections we have buffered data to send to
self.poller.setupWrite('opening_download') # Opening connections
self.monitor = Monitor(self)
self.page = Page(self)
self.content = ContentManager(self,configuration)
self.client = ClientManager(self.poller, configuration)
self.resolver = ResolverManager(self.poller, self.configuration, configuration.dns.retries*10)
self.proxy = Server('http proxy',self.poller,'read_proxy', configuration.http.connections)
self.web = Server('web server',self.poller,'read_web', configuration.web.connections)
self.icap = Server('icap server',self.poller,'read_icap', configuration.icap.connections)
self._shutdown = True if self.daemon.filemax == 0 else False # stop the program
self._softstop = False # stop once all current connection have been dealt with
self._reload = False # unimplemented
self._toggle_debug = False # start logging a lot
self._decrease_spawn_limit = 0
self._increase_spawn_limit = 0
self._refork = False # unimplemented
self._pdb = False # turn on pdb debugging
self._listen = None # listening change ? None: no, True: listen, False: stop listeing
self.wait_time = 5.0 # how long do we wait at maximum once we have been soft-killed
self.local = set() # what addresses are on our local interfaces
if not self.initialise():
self._shutdown = True
elif self.daemon.drop_privileges():
self.log.critical('Could not drop privileges to \'%s\'. Refusing to run as root' % self.daemon.user)
self.log.critical('Set the environment value USER to change the unprivileged user')
self._shutdown = True
# fork the redirector process before performing any further setup
redirector = fork_redirector(self.poller, self.configuration)
# create threads _after_ all forking is done
self.redirector = redirector_message_thread(redirector)
self.reactor = Reactor(self.configuration, self.web, self.proxy, self.icap, self.redirector, self.content, self.client, self.resolver, self.log_writer, self.usage_writer, self.poller)
self.interfaces()
signal.signal(signal.SIGQUIT, self.sigquit)
signal.signal(signal.SIGINT, self.sigterm)
signal.signal(signal.SIGTERM, self.sigterm)
# signal.signal(signal.SIGABRT, self.sigabrt)
#.........这里部分代码省略.........
示例4: ContentManager
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import debug [as 别名]
class ContentManager(object):
downloader_factory = Content
def __init__(self, supervisor, configuration):
self.total_sent4 = 0L
self.total_sent6 = 0L
self.opening = {}
self.established = {}
self.byclientid = {}
self.buffered = []
self.retry = []
self.configuration = configuration
self.supervisor = supervisor
self.poller = supervisor.poller
self.log = Logger('download', configuration.log.download)
self.location = os.path.realpath(os.path.normpath(configuration.web.html))
self.page = supervisor.page
self._header = {}
def hasClient(self, client_id):
return client_id in self.byclientid
def getLocalContent(self, code, name):
filename = os.path.normpath(os.path.join(self.location, name))
if not filename.startswith(self.location + os.path.sep):
filename = ''
if os.path.isfile(filename):
try:
stat = os.stat(filename)
except IOError:
# NOTE: we are always returning an HTTP/1.1 response
content = 'close', http(501, 'local file is inaccessible %s' % str(filename))
else:
if filename in self._header :
cache_time, header = self._header[filename]
else:
cache_time, header = None, None
if cache_time is None or cache_time < stat.st_mtime:
header = file_header(code, stat.st_size, filename)
self._header[filename] = stat.st_size, header
content = 'file', (header, filename)
else:
self.log.debug('local file is missing for %s: %s' % (str(name), str(filename)))
# NOTE: we are always returning an HTTP/1.1 response
content = 'close', http(501, 'could not serve missing file %s' % str(filename))
return content
def readLocalContent(self, code, reason, data={}):
filename = os.path.normpath(os.path.join(self.location, reason))
if not filename.startswith(self.location + os.path.sep):
filename = ''
if os.path.isfile(filename):
try:
with open(filename) as fd:
body = fd.read() % data
# NOTE: we are always returning an HTTP/1.1 response
content = 'close', http(code, body)
except IOError:
self.log.debug('local file is missing for %s: %s' % (str(reason), str(filename)))
# NOTE: we are always returning an HTTP/1.1 response
content = 'close', http(501, 'could not serve missing file %s' % str(reason))
else:
self.log.debug('local file is missing for %s: %s' % (str(reason), str(filename)))
# NOTE: we are always returning an HTTP/1.1 response
content = 'close', http(501, 'could not serve missing file %s' % str(reason))
return content
def getDownloader(self, client_id, host, port, command, request):
downloader = self.byclientid.get(client_id, None)
if downloader:
# NOTE: with pipeline, consequent request could go to other sites if the browser knows we are a proxy
# NOTE: therefore the second request could reach the first site
# NOTE: and we could kill the connection before the data is fully back to the client
# NOTE: in practice modern browser are too clever and test for it !
if host != downloader.host or port != downloader.port:
self.endClientDownload(client_id)
downloader = None
else:
newdownloader = False
if isipv4(host):
bind = self.configuration.tcp4.bind
elif isipv6(host):
bind = self.configuration.tcp6.bind
else:
# should really never happen
self.log.critical('the host IP address is neither IPv4 or IPv6 .. what year is it ?')
return None, False
if downloader is None:
#.........这里部分代码省略.........
示例5: __exit
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import debug [as 别名]
__exit(configuration.debug.memory,0)
try:
import cProfile as profile
except:
import profile
if not configuration.profile.destination or configuration.profile.destination == 'stdout':
profile.run('Supervisor().run()')
__exit(configuration.debug.memory,0)
notice = ''
profiled = configuration.profile.destination
if os.path.isdir(profiled):
notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profiled
if os.path.exists(configuration.profile.destination):
notice = 'profile can not use this filename as outpout, it already exists (%s)' % profiled
if not notice:
log.debug('profiling ....')
profile.run('main()',filename=configuration.profile.destination)
else:
log.debug("-"*len(notice))
log.debug(notice)
log.debug("-"*len(notice))
main()
__exit(configuration.debug.memory,0)
if __name__ == '__main__':
main()
示例6: Redirector
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import debug [as 别名]
class Redirector (Thread):
# TODO : if the program is a function, fork and run :)
def __init__ (self, configuration, name, request_box, program):
self.configuration = configuration
self.enabled = configuration.redirector.enable
self.protocol = configuration.redirector.protocol
self._transparent = configuration.http.transparent
self.log = Logger('worker ' + str(name), configuration.log.worker)
self.usage = UsageLogger('usage', configuration.log.worker)
self.universal = True if self.protocol == 'url' else False
self.icap = self.protocol[len('icap://'):].split('/')[0] if self.protocol.startswith('icap://') else ''
r, w = os.pipe() # pipe for communication with the main thread
self.response_box_write = os.fdopen(w,'w',0) # results are written here
self.response_box_read = os.fdopen(r,'r',0) # read from the main thread
self.wid = name # a unique name
self.creation = time.time() # when the thread was created
# self.last_worked = self.creation # when the thread last picked a task
self.request_box = request_box # queue with HTTP headers to process
self.program = program # the squid redirector program to fork
self.running = True # the thread is active
self.stats_timestamp = None # time of the most recent outstanding request to generate stats
self._proxy = 'ExaProxy-%s-id-%d' % (configuration.proxy.version,os.getpid())
if self.protocol == 'url':
self.classify = self._classify_url
if self.protocol.startswith('icap://'):
self.classify = self._classify_icap
# Do not move, we need the forking AFTER the setup
self.process = self._createProcess() # the forked program to handle classification
Thread.__init__(self)
def _createProcess (self):
if not self.enabled:
return
def preexec(): # Don't forward signals.
os.setpgrp()
try:
process = subprocess.Popen([self.program,],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=self.universal,
preexec_fn=preexec,
)
self.log.debug('spawn process %s' % self.program)
except KeyboardInterrupt:
process = None
except (subprocess.CalledProcessError,OSError,ValueError):
self.log.error('could not spawn process %s' % self.program)
process = None
if process:
try:
fcntl.fcntl(process.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
except IOError:
self.destroyProcess()
process = None
return process
def destroyProcess (self):
if not self.enabled:
return
self.log.debug('destroying process %s' % self.program)
if not self.process:
return
try:
if self.process:
self.process.terminate()
self.process.wait()
self.log.info('terminated process PID %s' % self.process.pid)
except OSError, e:
# No such processs
if e[0] != errno.ESRCH:
self.log.error('PID %s died' % self.process.pid)