本文整理汇总了Python中exaproxy.util.log.logger.Logger.error方法的典型用法代码示例。如果您正苦于以下问题:Python Logger.error方法的具体用法?Python Logger.error怎么用?Python Logger.error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类exaproxy.util.log.logger.Logger
的用法示例。
在下文中一共展示了Logger.error方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preExec
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
class ChildFactory:
def preExec (self):
os.setpgrp()
def __init__ (self, configuration, name):
self.log = Logger('worker ' + str(name), configuration.log.worker)
def createProcess (self, program, universal=False):
try:
process = subprocess.Popen([program],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=universal,
preexec_fn=self.preExec,
)
self.log.debug('spawn process %s' % program)
except KeyboardInterrupt:
process = None
except (subprocess.CalledProcessError,OSError,ValueError):
self.log.error('could not spawn process %s' % program)
process = None
if process:
try:
fcntl.fcntl(process.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
except IOError:
self.destroyProcess(process)
process = None
return process
def destroyProcess (self, process):
try:
process.terminate()
process.wait()
self.log.info('terminated process PID %s' % process.pid)
except OSError, e:
# No such processs
if e[0] != errno.ESRCH:
self.log.error('PID %s died' % process.pid)
示例2: ClientManager
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
class ClientManager (object):
unproxy = ProxyProtocol().parseRequest
def __init__(self, poller, configuration):
self.total_sent4 = 0L
self.total_sent6 = 0L
self.total_requested = 0L
self.norequest = TimeCache(configuration.http.idle_connect)
self.bysock = {}
self.byname = {}
self.buffered = []
self._nextid = 0
self.poller = poller
self.log = Logger('client', configuration.log.client)
self.proxied = configuration.http.proxied
self.max_buffer = configuration.http.header_size
def __contains__(self, item):
return item in self.byname
def getnextid(self):
self._nextid += 1
return str(self._nextid)
def expire (self,number=100):
count = 0
for sock in self.norequest.expired(number):
client = self.norequest.get(sock,[None,])[0]
if client:
self.cleanup(sock,client.name)
count += 1
return count
def newConnection(self, sock, peer, source):
name = self.getnextid()
client = Client(name, sock, peer, self.log, self.max_buffer)
self.norequest[sock] = client, source
self.byname[name] = client, source
# watch for the opening request
self.poller.addReadSocket('opening_client', client.sock)
#self.log.info('new id %s (socket %s) in clients : %s' % (name, sock, sock in self.bysock))
return peer
def readRequest(self, sock):
"""Read only the initial HTTP headers sent by the client"""
client, source = self.norequest.get(sock, (None, None))
if client:
name, peer, request, content = client.readData()
if request:
self.total_requested += 1
# headers can be read only once
self.norequest.pop(sock, (None, None))
# we have now read the client's opening request
self.poller.removeReadSocket('opening_client', client.sock)
elif request is None:
self.cleanup(sock, client.name)
else:
self.log.error('trying to read headers from a client that does not exist %s' % sock)
name, peer, request, content, source = None, None, None, None, None
if request and self.proxied is True and source == 'proxy':
client_ip, client_request = self.unproxy(request)
if client_ip and client_request:
peer = client_ip
request = client_request
client.setPeer(client_ip)
return name, peer, request, content, source
def readDataBySocket(self, sock):
client, source = self.bysock.get(sock, (None, None))
if client:
name, peer, request, content = client.readData()
if request:
self.total_requested += 1
# Parsing of the new request will be handled asynchronously. Ensure that
# we do not read anything from the client until a request has been sent
# to the remote webserver.
# Since we just read a request, we know that the cork is not currently
# set and so there's no risk of it being erroneously removed.
self.poller.corkReadSocket('read_client', sock)
elif request is None:
self.cleanup(sock, client.name)
else:
self.log.error('trying to read from a client that does not exist %s' % sock)
name, peer, request, content = None, None, None, None
return name, peer, request, content, source
#.........这里部分代码省略.........
示例3: Supervisor
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
class Supervisor (object):
alarm_time = 0.1 # regular backend work
second_frequency = int(1/alarm_time) # when we record history
minute_frequency = int(60/alarm_time) # when we want to average history
increase_frequency = int(5/alarm_time) # when we add workers
decrease_frequency = int(60/alarm_time) # when we remove workers
saturation_frequency = int(20/alarm_time) # when we report connection saturation
interface_frequency = int(300/alarm_time) # when we check for new interfaces
# import os
# clear = [hex(ord(c)) for c in os.popen('clear').read()]
# clear = ''.join([chr(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']])
def __init__ (self,configuration):
configuration = load()
self.configuration = configuration
# Only here so the introspection code can find them
self.log = Logger('supervisor', configuration.log.supervisor)
self.log.error('Starting exaproxy version %s' % configuration.proxy.version)
self.signal_log = Logger('signal', configuration.log.signal)
self.log_writer = SysLogWriter('log', configuration.log.destination, configuration.log.enable, level=configuration.log.level)
self.usage_writer = UsageWriter('usage', configuration.usage.destination, configuration.usage.enable)
sys.exitfunc = self.log_writer.writeMessages
self.log_writer.setIdentifier(configuration.daemon.identifier)
#self.usage_writer.setIdentifier(configuration.daemon.identifier)
if configuration.debug.log:
self.log_writer.toggleDebug()
self.usage_writer.toggleDebug()
self.log.error('python version %s' % sys.version.replace(os.linesep,' '))
self.log.debug('starting %s' % sys.argv[0])
self.pid = PID(self.configuration)
self.daemon = Daemon(self.configuration)
self.poller = Poller(self.configuration.daemon)
self.poller.setupRead('read_proxy') # Listening proxy sockets
self.poller.setupRead('read_web') # Listening webserver sockets
self.poller.setupRead('read_icap') # Listening icap sockets
self.poller.setupRead('read_redirector') # Pipes carrying responses from the redirector process
self.poller.setupRead('read_resolver') # Sockets currently listening for DNS responses
self.poller.setupRead('read_client') # Active clients
self.poller.setupRead('opening_client') # Clients we have not yet read a request from
self.poller.setupWrite('write_client') # Active clients with buffered data to send
self.poller.setupWrite('write_resolver') # Active DNS requests with buffered data to send
self.poller.setupRead('read_download') # Established connections
self.poller.setupWrite('write_download') # Established connections we have buffered data to send to
self.poller.setupWrite('opening_download') # Opening connections
self.monitor = Monitor(self)
self.page = Page(self)
self.content = ContentManager(self,configuration)
self.client = ClientManager(self.poller, configuration)
self.resolver = ResolverManager(self.poller, self.configuration, configuration.dns.retries*10)
self.proxy = Server('http proxy',self.poller,'read_proxy', configuration.http.connections)
self.web = Server('web server',self.poller,'read_web', configuration.web.connections)
self.icap = Server('icap server',self.poller,'read_icap', configuration.icap.connections)
self._shutdown = True if self.daemon.filemax == 0 else False # stop the program
self._softstop = False # stop once all current connection have been dealt with
self._reload = False # unimplemented
self._toggle_debug = False # start logging a lot
self._decrease_spawn_limit = 0
self._increase_spawn_limit = 0
self._refork = False # unimplemented
self._pdb = False # turn on pdb debugging
self._listen = None # listening change ? None: no, True: listen, False: stop listeing
self.wait_time = 5.0 # how long do we wait at maximum once we have been soft-killed
self.local = set() # what addresses are on our local interfaces
if not self.initialise():
self._shutdown = True
elif self.daemon.drop_privileges():
self.log.critical('Could not drop privileges to \'%s\'. Refusing to run as root' % self.daemon.user)
self.log.critical('Set the environment value USER to change the unprivileged user')
self._shutdown = True
# fork the redirector process before performing any further setup
redirector = fork_redirector(self.poller, self.configuration)
# create threads _after_ all forking is done
self.redirector = redirector_message_thread(redirector)
self.reactor = Reactor(self.configuration, self.web, self.proxy, self.icap, self.redirector, self.content, self.client, self.resolver, self.log_writer, self.usage_writer, self.poller)
self.interfaces()
signal.signal(signal.SIGQUIT, self.sigquit)
signal.signal(signal.SIGINT, self.sigterm)
signal.signal(signal.SIGTERM, self.sigterm)
# signal.signal(signal.SIGABRT, self.sigabrt)
#.........这里部分代码省略.........
示例4: poll
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
try:
poll([f], [], [f], 0.1)
except socket.error:
print "CANNOT POLL (read): %s" % str(f)
log.error('can not poll (read) : %s' % str(f))
for f in write:
try:
poll([], [f], [f], 0.1)
except socket.error:
print "CANNOT POLL (write): %s" % str(f)
log.error('can not poll (write) : %s' % str(f))
raise e
except (ValueError, AttributeError, TypeError), e:
log.error("fatal error encountered during select - %s %s" % (type(e),str(e)))
raise e
except select.error, e:
if e.args[0] in errno_block:
return [], [], []
log.error("fatal error encountered during select - %s %s" % (type(e),str(e)))
raise e
except KeyboardInterrupt,e:
raise e
except Exception, e:
log.critical("fatal error encountered during select - %s %s" % (type(e),str(e)))
raise e
return r, w, x
示例5: ResolverManager
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
class ResolverManager (object):
resolverFactory = DNSResolver
def __init__ (self, poller, configuration, max_workers):
self.poller = poller
self.configuration = configuration
self.resolver_factory = self.resolverFactory(configuration)
# The actual work is done in the worker
self.worker = self.resolver_factory.createUDPClient()
# All currently active clients (one UDP and many TCP)
self.workers = {}
self.workers[self.worker.socket] = self.worker
self.poller.addReadSocket('read_resolver', self.worker.socket)
# Track the clients currently expecting results
self.clients = {} # client_id : identifier
# Key should be the hostname rather than the request ID?
self.resolving = {} # identifier, worker_id :
# TCP workers that have not yet sent a complete request
self.sending = {} # sock :
# Maximum number of entry we will cache (1024 DNS lookup per second !)
# assuming 1k per entry, which is a lot, it mean 20Mb of memory
# which at the default of 900 seconds of cache is 22 new host per seonds
self.max_entries = 1024*20
# track the current queries and when they were started
self.active = []
self.cache = {}
self.cached = deque()
self.max_workers = max_workers
self.worker_count = len(self.workers) # the UDP client
self.waiting = []
self.log = Logger('resolver', configuration.log.resolver)
self.chained = {}
def cacheDestination (self, hostname, ip):
if hostname not in self.cache:
expire_time = time.time() + self.configuration.dns.ttl
expire_time = expire_time - expire_time % 5 # group the DNS record per buckets 5 seconds
latest_time, latest_hosts = self.cached[-1] if self.cached else (-1, None)
if expire_time > latest_time:
hosts = []
self.cached.append((expire_time, hosts))
else:
hosts = latest_hosts
self.cache[hostname] = ip
hosts.append(hostname)
def expireCache (self):
# expire only one set of cache entries at a time
if self.cached:
current_time = time.time()
expire_time, hosts = self.cached[0]
if current_time >= expire_time or len(self.cache) > self.max_entries:
expire_time, hosts = self.cached.popleft()
for hostname in hosts:
self.cache.pop(hostname, None)
def cleanup(self):
now = time.time()
cutoff = now - self.configuration.dns.timeout
count = 0
for timestamp, client_id, sock in self.active:
if timestamp > cutoff:
break
count += 1
cli_data = self.clients.pop(client_id, None)
worker = self.workers.get(sock)
tcpudp = 'udp' if worker is self.worker else 'tcp'
if cli_data is not None:
w_id, identifier, active_time, resolve_count = cli_data
data = self.resolving.pop((w_id, identifier), None)
if not data:
data = self.sending.pop(sock, None)
if data:
client_id, original, hostname, command, decision = data
self.log.error('timeout when requesting address for %s using the %s client - attempt %s' % (hostname, tcpudp, resolve_count))
if resolve_count < self.configuration.dns.retries and worker is self.worker:
self.log.info('going to retransmit request for %s - attempt %s of %s' % (hostname, resolve_count+1, self.configuration.dns.retries))
self.startResolving(client_id, command, decision, resolve_count+1, identifier=identifier)
#.........这里部分代码省略.........
示例6: RedirectorManager
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
class RedirectorManager (object):
def __init__ (self, configuration, poller):
self.low = configuration.redirector.minimum # minimum concurrent redirector workers
self.high = configuration.redirector.maximum # maximum concurrent redirector workers
self.poller = poller
self.configuration = configuration
self.queue = Queue() # store requests we do not immediately have the resources to process
self.nextid = 1 # unique id to give to the next spawned worker
self.worker = {} # worker tasks for each spawned child
self.processes = {} # worker tasks indexed by file descriptors we can poll
self.available = set() # workers that are currently available to handle new requests
self.active = {} # workers that are currently busy waiting for a response from the spawned process
self.stopping = set() # workers we want to stop as soon as they stop being active
program = configuration.redirector.program
protocol = configuration.redirector.protocol
self.redirector_factory = RedirectorFactory(configuration, program, protocol)
self.log = Logger('manager', configuration.log.manager)
def _getid(self):
wid = str(self.nextid)
self.nextid += 1
return wid
def _spawn (self):
"""add one worker to the pool"""
wid = self._getid()
worker = self.redirector_factory.create(wid)
self.worker[wid] = worker
self.available.add(wid)
if worker.process is not None:
identifier = worker.process.stdout
self.processes[identifier] = worker
self.poller.addReadSocket('read_workers', identifier)
self.log.info("added a worker")
self.log.info("we have %d workers. defined range is ( %d / %d )" % (len(self.worker), self.low, self.high))
def spawn (self, number=1):
"""create the request number of worker processes"""
self.log.info("spawning %d more workers" % number)
for _ in range(number):
self._spawn()
def respawn (self):
"""make sure we reach the minimum number of workers"""
number = max(min(len(self.worker), self.high), self.low)
for wid in set(self.worker):
self.stopWorker(wid)
self.spawn(number)
def stopWorker (self, wid):
self.log.info('want worker %s to go away' % wid)
if wid not in self.active:
self.reap(wid)
else:
self.stopping.add(wid)
def reap (self, wid):
self.log.info('we are killing worker %s' % wid)
worker = self.worker[wid]
if wid in self.active:
self.log.error('reaping worker %s even though it is still active' % wid)
self.active.pop(wid)
if wid in self.stopping:
self.stopping.remove(wid)
if wid in self.available:
self.available.remove(wid)
if worker.process is not None:
self.poller.removeReadSocket('read_workers', worker.process.stdout)
self.processes.pop(worker.process.stdout)
worker.shutdown()
self.worker.pop(wid)
def _decrease (self):
if self.low < len(self.worker):
wid = self._oldest()
if wid:
self.stopWorker(wid)
def _increase (self):
if len(self.worker) < self.high:
self.spawn()
def decrease (self, count=1):
for _ in xrange(count):
#.........这里部分代码省略.........
示例7: ClientManager
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
class ClientManager (object):
def __init__(self, poller, configuration):
self.total_sent4 = 0L
self.total_sent6 = 0L
self.total_requested = 0L
self.norequest = TimeCache(configuration.http.idle_connect)
self.bysock = {}
self.byname = {}
self.buffered = []
self._nextid = 0
self.poller = poller
self.log = Logger('client', configuration.log.client)
self.http_max_buffer = configuration.http.header_size
self.icap_max_buffer = configuration.icap.header_size
self.proxied = {
'proxy' : configuration.http.proxied,
'icap' : configuration.icap.proxied,
}
def __contains__(self, item):
return item in self.bysock
def lookupSocket (self, item):
return self.byname.get(item, None)
def getnextid(self):
self._nextid += 1
return str(self._nextid)
def expire (self,number=100):
count = 0
for sock in self.norequest.expired(number):
client = self.norequest.get(sock,[None,])[0]
if client:
self.cleanup(sock,client.name)
count += 1
return count
def httpConnection (self, sock, peer, source):
name = self.getnextid()
client = HTTPClient(name, sock, peer, self.log, self.http_max_buffer, self.proxied.get(source))
self.norequest[sock] = client, source
self.byname[name] = sock
# watch for the opening request
self.poller.addReadSocket('opening_client', client.sock)
#self.log.info('new id %s (socket %s) in clients : %s' % (name, sock, sock in self.bysock))
return peer
def icapConnection (self, sock, peer, source):
name = self.getnextid()
client = ICAPClient(name, sock, peer, self.log, self.icap_max_buffer, self.proxied.get(source))
self.norequest[sock] = client, source
self.byname[name] = sock
# watch for the opening request
self.poller.addReadSocket('opening_client', client.sock)
#self.log.info('new id %s (socket %s) in clients : %s' % (name, sock, sock in self.bysock))
return peer
def readRequest (self, sock):
"""Read only the initial HTTP headers sent by the client"""
client, source = self.norequest.get(sock, (None, None))
if client:
name, peer, request, subrequest, content = client.readData()
if request:
self.total_requested += 1
# headers can be read only once
self.norequest.pop(sock, (None, None))
self.bysock[sock] = client, source
# watch for the client sending new data
self.poller.addReadSocket('read_client', client.sock)
# we have now read the client's opening request
self.poller.removeReadSocket('opening_client', client.sock)
# do not read more data until we have properly handled the request
self.poller.corkReadSocket('read_client', sock)
elif request is None:
self.cleanup(sock, client.name)
else:
self.log.error('trying to read headers from a client that does not exist %s' % sock)
name, peer, request, subrequest, content, source = None, None, None, None, None, None
return name, peer, request, subrequest, content, source
def readData (self, sock):
client, source = self.bysock.get(sock, (None, None))
if client:
#.........这里部分代码省略.........
示例8: len
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
for events in res:
fd = events.ident
name, poller, sockets, fdtosock = self.pollers[fd]
events = poller.control(None, self.max_events, 0)
if len(events) == self.max_events:
log.warning("polled max_events from queue %s" % name)
for sock_events in events:
sock_fd = sock_events.ident
try:
response[name].append(fdtosock[sock_fd])
except KeyError:
log.error("KQueue register called before fdtosock registered! Skipping event")
continue
if sock_events.flags & select.KQ_EV_ERROR:
log.warning("%s KQ_EV_ERROR: fd=%d filter=%d fflags=%d flags=%d data=%d udata=%d" % (
str(datetime.datetime.now()),
sock_events.ident, sock_events.filter, sock_events.flags, sock_events.fflags,
sock_events.data, sock_events.udata))
sock = fdtosock.pop(sock_fd, None)
poller.control([kevent(sock, sock_events.filter, KQ_EV_DELETE)], 0)
sockets.pop(sock)
if sock not in self.errors:
self.errors[sock] = name
示例9: Redirector
# 需要导入模块: from exaproxy.util.log.logger import Logger [as 别名]
# 或者: from exaproxy.util.log.logger.Logger import error [as 别名]
class Redirector (Thread):
# TODO : if the program is a function, fork and run :)
def __init__ (self, configuration, name, request_box, program):
self.configuration = configuration
self.enabled = configuration.redirector.enable
self.protocol = configuration.redirector.protocol
self._transparent = configuration.http.transparent
self.log = Logger('worker ' + str(name), configuration.log.worker)
self.usage = UsageLogger('usage', configuration.log.worker)
self.universal = True if self.protocol == 'url' else False
self.icap = self.protocol[len('icap://'):].split('/')[0] if self.protocol.startswith('icap://') else ''
r, w = os.pipe() # pipe for communication with the main thread
self.response_box_write = os.fdopen(w,'w',0) # results are written here
self.response_box_read = os.fdopen(r,'r',0) # read from the main thread
self.wid = name # a unique name
self.creation = time.time() # when the thread was created
# self.last_worked = self.creation # when the thread last picked a task
self.request_box = request_box # queue with HTTP headers to process
self.program = program # the squid redirector program to fork
self.running = True # the thread is active
self.stats_timestamp = None # time of the most recent outstanding request to generate stats
self._proxy = 'ExaProxy-%s-id-%d' % (configuration.proxy.version,os.getpid())
if self.protocol == 'url':
self.classify = self._classify_url
if self.protocol.startswith('icap://'):
self.classify = self._classify_icap
# Do not move, we need the forking AFTER the setup
self.process = self._createProcess() # the forked program to handle classification
Thread.__init__(self)
def _createProcess (self):
if not self.enabled:
return
def preexec(): # Don't forward signals.
os.setpgrp()
try:
process = subprocess.Popen([self.program,],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=self.universal,
preexec_fn=preexec,
)
self.log.debug('spawn process %s' % self.program)
except KeyboardInterrupt:
process = None
except (subprocess.CalledProcessError,OSError,ValueError):
self.log.error('could not spawn process %s' % self.program)
process = None
if process:
try:
fcntl.fcntl(process.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
except IOError:
self.destroyProcess()
process = None
return process
def destroyProcess (self):
if not self.enabled:
return
self.log.debug('destroying process %s' % self.program)
if not self.process:
return
try:
if self.process:
self.process.terminate()
self.process.wait()
self.log.info('terminated process PID %s' % self.process.pid)
except OSError, e:
# No such processs
if e[0] != errno.ESRCH:
self.log.error('PID %s died' % self.process.pid)