本文整理汇总了Python中logbook.FileHandler.push_application方法的典型用法代码示例。如果您正苦于以下问题:Python FileHandler.push_application方法的具体用法?Python FileHandler.push_application怎么用?Python FileHandler.push_application使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类logbook.FileHandler
的用法示例。
在下文中一共展示了FileHandler.push_application方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
def main():
warnings.simplefilter('error', UserWarning)
warnings.simplefilter('error', Warning)
level='WARNING'
from logbook import FileHandler, StreamHandler
log_handler1 = FileHandler('application.log')
log_handler1.push_application()
test4()
pylab.show()
示例2: setup_logging
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
def setup_logging(config):
log_file = os.path.join(config['daemon']['app_path'],
config['daemon']['log']['file'])
# if running in debug mode, disable log rotation because it makes
# things confusing
if config['daemon']['debug']:
log_handler = FileHandler(log_file)
else:
max_size = config['daemon']['log']['rotate_size']
backup_count = config['daemon']['log']['rotate_count']
log_handler = RotatingFileHandler(log_file, max_size=max_size,
backup_count=backup_count)
log_handler.push_application()
log = Logger('edgy_crits')
return log
示例3: Logger
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, nested_scopes
import os
import logbook
from logbook import FileHandler
from logbook import Logger
log = Logger('scraper')
# Create a logs direcory if not exist
if not os.path.exists('logs'):
os.makedirs('logs')
file_handler = FileHandler('logs/app.log', level=logbook.DEBUG)
file_handler.push_application()
示例4: Logger
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import shutil
from uuid import uuid4
from time import strftime
import psutil
from types import NoneType
from logbook import FileHandler, Logger, CRITICAL
log = Logger("Browser")
if os.path.exists("application.log"):
log_handler = FileHandler('application.log')
log_handler.push_application()
else:
log.level = CRITICAL
class Browser(object):
def _expand(self, path):
log.debug("Expanding: {}".format(path))
if self.os == "linux" or self.os == "darwin":
return os.path.expanduser(path)
elif self.os == "windows" and platform.release() != "XP":
return os.path.expandvars(path)
else:
log.error("Unsupported OS: {} - expanding failed.".format(self.os))
return None
示例5: Qdb
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
class Qdb(Bdb, object):
"""
The Quantopian Remote Debugger.
"""
_instance = None
def __new__(cls, *args, **kwargs):
"""
Qdb objects are singletons that persist until their disable method is
called.
"""
if not cls._instance:
cls._instance = super(Qdb, cls).__new__(cls)
cls._instance._init(*args, **kwargs)
return cls._instance
def __init__(self, *args, **kwargs):
pass
def _init(self, config=None, merge=False, **kwargs):
"""
See qdb.config for more information about the configuration of
qdb.
merge denotes how config and kwargs should be merged.
QdbConfig.kwargs_first says config will trample kwargs,
QdbConfig.config_first says kwargs will trample config.
Otherwise, kwargs and config cannot both be passed.
"""
self.super_ = super(Qdb, self)
self.super_.__init__()
self.reset()
if config and kwargs:
if merge == QdbConfig.kwargs_first:
first = kwargs
second = config
elif merge == QdbConfig.config_first:
first = config
second = kwargs
else:
raise TypeError('Cannot pass config and kwargs')
config = first.merge(second)
else:
config = QdbConfig.get_config(config or kwargs)
self.address = config.host, config.port
self.set_default_file(config.default_file)
self.default_namespace = config.default_namespace or {}
self.exception_serializer = config.exception_serializer or \
default_exception_serializer
self.eval_fn = config.eval_fn or default_eval_fn
self.green = config.green
self._file_cache = {}
self.redirect_output = config.redirect_output
self.retry_attepts = config.retry_attepts
self.repr_fn = config.repr_fn
self._skip_fn = config.skip_fn or (lambda _: False)
self.pause_signal = config.pause_signal \
if config.pause_signal else signal.SIGUSR2
self.uuid = str(config.uuid or uuid4())
self.watchlist = {}
self.execution_timeout = config.execution_timeout
self.reset()
self.log_handler = None
if config.log_file:
self.log_handler = FileHandler(config.log_file)
self.log_handler.push_application()
# The timing between these lines might matter depending on the
# cmd_manager. Don't seperate them.
self.cmd_manager = (config.cmd_manager or RemoteCommandManager)(self)
self.cmd_manager.start(config.auth_msg)
# We need to be able to send stdout back to the user debugging the
# program. We hold a handle to this in case the program resets stdout.
if self.redirect_output:
self._old_stdout = sys.stdout
self._old_stderr = sys.stderr
sys.stdout = OutputTee(
sys.stdout,
RemoteOutput(self.cmd_manager, '<stdout>'),
)
sys.stderr = OutputTee(
sys.stderr,
RemoteOutput(self.cmd_manager, '<stderr>'),
)
def skip_fn(self, path):
return self._skip_fn(self.canonic(path))
def restore_output_streams(self):
"""
Restores the original output streams.
"""
if self.redirect_output:
sys.stdout = self._old_stdout
sys.stderr = self._old_stderr
def _new_execution_timeout(self, src):
"""
Return a new execution timeout context manager.
#.........这里部分代码省略.........
示例6: set_req
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
for k,v in headers.iteritems():
self.req.add_header(k,v)
def set_req(self):
self.req = urllib2.Request(self.url, urllib.urlencode(self.data))
#self.req = urllib2.Request(self.url)
def send(self):
self.set_req()
return urllib2.urlopen(self.req)
if __name__ == "__main__":
logger = Logger("TicketchangeToInfluxdb")
logfile = "ticketchangetoinfluxdb.log"
fh = FileHandler(logfile,"a")
fh.applicationbound()
fh.push_application()
client = Client()
client.test()
adapter = Adapter()
client.set_adapter(adapter)
a =client.get_adapter()
a.test()
print("This is just a test.")
logger.info("Testing logging.")
示例7: start_logging
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
def start_logging(filename):
this_file = os.path.basename(filename)
log_file = '/var/log/' + remove_extn(this_file) + '.log'
log_handler = FileHandler(log_file, bubble=True)
log_handler.push_application()
示例8: main
# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import push_application [as 别名]
def main(user_email,
url_api_collection,
log_handler=None,
mail_handler=None,
dir_profile='profiles',
profile_path=None,
config_file=None,
**kwargs):
'''Executes a harvest with given parameters.
Returns the ingest_doc_id, directory harvest saved to and number of
records.
'''
if not config_file:
config_file = os.environ.get('DPLA_CONFIG_FILE', 'akara.ini')
num_recs = -1
my_mail_handler = None
if not mail_handler:
my_mail_handler = logbook.MailHandler(
EMAIL_RETURN_ADDRESS, user_email, level='ERROR', bubble=True)
my_mail_handler.push_application()
mail_handler = my_mail_handler
try:
collection = Collection(url_api_collection)
except Exception as e:
msg = 'Exception in Collection {}, init {}'.format(url_api_collection,
str(e))
logbook.error(msg)
raise e
if not (collection['harvest_type'] in HARVEST_TYPES):
msg = 'Collection {} wrong type {} for harvesting. Harvest type {} \
is not in {}'.format(url_api_collection,
collection['harvest_type'],
collection['harvest_type'],
HARVEST_TYPES.keys())
logbook.error(msg)
raise ValueError(msg)
mail_handler.subject = "Error during harvest of " + collection.url
my_log_handler = None
if not log_handler: # can't init until have collection
my_log_handler = FileHandler(get_log_file_path(collection.slug))
my_log_handler.push_application()
logger = logbook.Logger('HarvestMain')
msg = 'Init harvester next. Collection:{}'.format(collection.url)
logger.info(msg)
# email directly
mimetext = create_mimetext_msg(EMAIL_RETURN_ADDRESS, user_email, ' '.join(
('Starting harvest for ', collection.slug)), msg)
try: # TODO: request more emails from AWS
mail_handler.deliver(mimetext, '[email protected]')
except:
pass
logger.info('Create DPLA profile document')
if not profile_path:
profile_path = os.path.abspath(
os.path.join(dir_profile, collection.id + '.pjs'))
with codecs.open(profile_path, 'w', 'utf8') as pfoo:
pfoo.write(collection.dpla_profile)
logger.info('DPLA profile document : ' + profile_path)
harvester = None
try:
harvester = HarvestController(
user_email,
collection,
profile_path=profile_path,
config_file=config_file,
**kwargs)
except Exception as e:
import traceback
msg = 'Exception in harvester init: type: {} TRACE:\n{}'.format(
type(e), traceback.format_exc())
logger.error(msg)
raise e
logger.info('Create ingest doc in couch')
ingest_doc_id = harvester.create_ingest_doc()
logger.info('Ingest DOC ID: ' + ingest_doc_id)
logger.info('Start harvesting next')
try:
num_recs = harvester.harvest()
msg = ''.join(('Finished harvest of ', collection.slug, '. ',
str(num_recs), ' records harvested.'))
harvester.update_ingest_doc('complete', items=num_recs, num_coll=1)
logger.info(msg)
# email directly
mimetext = create_mimetext_msg(
EMAIL_RETURN_ADDRESS, user_email, ' '.join(
('Finished harvest of raw records '
'for ', collection.slug, ' enriching next')), msg)
try:
mail_handler.deliver(mimetext, '[email protected]')
except:
pass
except Exception as e:
import traceback
error_msg = ''.join(("Error while harvesting: type-> ", str(type(e)),
" TRACE:\n" + str(traceback.format_exc())))
logger.error(error_msg)
harvester.update_ingest_doc(
'error', error_msg=error_msg, items=num_recs)
raise e
if my_log_handler:
#.........这里部分代码省略.........