本文整理汇总了Python中logging.config方法的典型用法代码示例。如果您正苦于以下问题:Python logging.config方法的具体用法?Python logging.config怎么用?Python logging.config使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类logging
的用法示例。
在下文中一共展示了logging.config方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: got_config_request
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def got_config_request(self, ch, method, properties, body):
ip_addresses = json.loads(body)
logging.info("Got config request with following IP addresses: %s" % ip_addresses)
pi_id = None
worker = db.session.query(db.objects.Worker).filter(db.objects.Worker.address.in_(ip_addresses)).first()
if worker:
pi_id = worker.id
logging.debug("Found worker id %s for IP address %s" % (pi_id, worker.address))
else: # wasn't able to find worker with given ip address(es)
logging.error("Wasn't able to find worker for given IP adress(es)")
reply_properties = pika.BasicProperties(correlation_id=properties.correlation_id)
self.channel.basic_publish(exchange=utils.EXCHANGE, properties=reply_properties, routing_key=properties.reply_to, body="")
return
config = self.prepare_config(pi_id)
logging.info("Sending intial config to worker with id %s" % pi_id)
reply_properties = pika.BasicProperties(correlation_id=properties.correlation_id, content_type='application/json')
self.channel.basic_publish(exchange=utils.EXCHANGE, properties=reply_properties, routing_key=properties.reply_to, body=json.dumps(config))
# callback method for when the manager recieves data after a worker executed its actions
示例2: got_on_off
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def got_on_off(self, ch, method, properties, body):
msg = json.loads(body)
self.cleanup_notifiers()
if(msg['active_state'] == True):
self.setup_notifiers()
logging.info("Activating setup: %s" % msg['setup_name'])
workers = db.session.query(db.objects.Worker).filter(db.objects.Worker.active_state == True).all()
for pi in workers:
config = self.prepare_config(pi.id)
# check if we are deactivating --> worker should be deactivated!
if(msg['active_state'] == False):
config["active"] = False
logging.info("Deactivating setup: %s" % msg['setup_name'])
self.send_json_message(utils.QUEUE_CONFIG+str(pi.id), config)
logging.info("Sent config to worker %s"%pi.name)
# callback method which gets called when a worker raises an alarm
示例3: got_init_config
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def got_init_config(self, ch, method, properties, body):
logging.info("Received intitial config %r" % (body))
if self.corr_id == properties.correlation_id: #we got the right config
try: #TODO: add check if response is empty...
new_conf = json.loads(body)
new_conf["rabbitmq"] = config.get("rabbitmq")
except Exception as e:
logging.exception("Wasn't able to read JSON config from manager:\n%s" % e)
time.sleep(60) #sleep for X seconds and then ask again
self.fetch_init_config()
return
logging.info("Trying to apply config and reconnect")
self.apply_config(new_conf)
self.connection_cleanup()
self.connect() #hope this is the right spot
logging.info("Initial config activated")
self.start()
else:
logging.info("This config isn't meant for us")
# Create a zip of all the files which were collected while actions were executed
示例4: configure_properties
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None):
"""Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties
"""
prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames,
'properties.cfg;local-properties.cfg')
prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in
prop_filenames.split(';')]
prop_filenames = ';'.join(prop_filenames)
# Configure config only if properties filename has changed
if self.config_properties_filenames != prop_filenames:
# Initialize the config object
self.config = ExtendedConfigParser.get_config_from_file(prop_filenames)
self.config_properties_filenames = prop_filenames
self.update_magic_config_names()
# Override properties with system properties
self.config.update_properties(os.environ)
# Override properties with behave userdata properties
if behave_properties:
self.config.update_properties(behave_properties)
示例5: get_config_window_bounds
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def get_config_window_bounds(self):
"""Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
"""
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
monitor = screeninfo.get_monitors()[monitor_index]
bounds_x += monitor.x
bounds_y += monitor.y
except NotImplementedError:
self.logger.warn('Current environment doesn\'t support get_monitors')
return bounds_x, bounds_y
示例6: define_atlas_settings
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def define_atlas_settings():
atlas_settings = {
'DKT': {
'path' : os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'cvs_avg35_inMNI152.aparc.32k_fs_LR.dlabel.nii'),
'order' : 1,
'name': 'DKT',
'map_number': 1
},
'MMP': {
'path': os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'Q1-Q6_RelatedValidation210.CorticalAreas_dil_Final_Final_Areas_Group_Colors.32k_fs_LR.dlabel.nii'),
'order' : 3,
'name' : 'MMP',
'map_number': 1
},
'Yeo7': {
'path' : os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'RSN-networks.32k_fs_LR.dlabel.nii'),
'order' : 2,
'name' : 'Yeo7',
'map_number': 1
}
}
return(atlas_settings)
示例7: generate_qc_page
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def generate_qc_page(user_settings, config, qc_dir, scene_dir, qc_html,
temp_dir, title_formatter):
sbref_nii = change_sbref_palette(user_settings, temp_dir)
dtseries_sm = get_smoothed_dtseries_file(user_settings, temp_dir)
contents = config.get_template_contents()
scene_file = personalize_template(contents, scene_dir, user_settings,
sbref_nii, dtseries_sm)
ciftify.utils.make_dir(qc_dir)
with open(qc_html, 'w') as qc_page:
ciftify.html.add_page_header(qc_page, config, user_settings.qc_mode,
subject=user_settings.subject, path='..')
wb_logging = 'INFO' if user_settings.debug_mode else 'WARNING'
ciftify.html.add_images(qc_page, qc_dir, config.images,
scene_file, wb_logging = wb_logging, add_titles = True,
title_formatter = title_formatter)
示例8: run_migrations_offline
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
示例9: main
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def main(argv):
parsed_args = parser.parse_args(argv[1:])
mod, klass = parsed_args.transport.rsplit('.', 1)
transport = getattr(importlib.import_module(mod), klass)()
command = parsed_args.command
if not command:
logging.config.dictConfig(LOGGING)
transport.run_server(
pool_size=parsed_args.pool_size,
max_accepts=parsed_args.max_accepts,
max_child_tasks=parsed_args.max_child_tasks
)
else:
stdout, stderr, returncode = transport.run_cmd(' '.join(command))
sys.stdout.write(stdout)
sys.stderr.write(stderr)
sys.exit(returncode)
示例10: update_config
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def update_config(update):
"""
Update (recursively) the current logging condiguration dictionary.
See: `Logging config dictionary schema <https://docs.python.org/3/library/logging.config.html#logging-config-dictschema>`_
"""
global _config
new_config = copy.deepcopy(_config)
_update_dict_recursive(new_config, update)
logging.config.dictConfig(new_config)
_configure_ulog_bridge()
_config = new_config
# set the default log configuration on import
示例11: run_migrations_offline
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
示例12: _load_connectors
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def _load_connectors(self, main_config):
self.connectors_configs = {}
if main_config.get("connectors"):
for connector in main_config['connectors']:
try:
connector_class = TBUtility.check_and_import(connector["type"], self._default_connectors.get(connector["type"], connector.get("class")))
self._implemented_connectors[connector["type"]] = connector_class
with open(self._config_dir + connector['configuration'], 'r', encoding="UTF-8") as conf_file:
connector_conf = load(conf_file)
if not self.connectors_configs.get(connector['type']):
self.connectors_configs[connector['type']] = []
connector_conf["name"] = connector["name"]
self.connectors_configs[connector['type']].append({"name": connector["name"], "config": {connector['configuration']: connector_conf}})
except Exception as e:
log.error("Error on loading connector:")
log.exception(e)
else:
log.error("Connectors - not found! Check your configuration!")
main_config["remoteConfiguration"] = True
log.info("Remote configuration is enabled forcibly!")
示例13: _connect_with_connectors
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def _connect_with_connectors(self):
for connector_type in self.connectors_configs:
for connector_config in self.connectors_configs[connector_type]:
for config in connector_config["config"]:
connector = None
try:
if connector_config["config"][config] is not None:
connector = self._implemented_connectors[connector_type](self, connector_config["config"][config],
connector_type)
connector.setName(connector_config["name"])
self.available_connectors[connector.get_name()] = connector
connector.open()
else:
log.info("Config not found for %s", connector_type)
except Exception as e:
log.exception(e)
if connector is not None:
connector.close()
示例14: _add_monitor_to_experiment
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def _add_monitor_to_experiment(experiment: experiment.Experiment) -> experiment.Experiment:
logger.info(f"configured training hooks: {experiment.train_spec.hooks}")
training_hooks = list(experiment.train_spec.hooks)
if experiment.config.log_step_count_steps is not None:
steps_per_second_hook = metrics.StepPerSecondHook(
every_n_steps=experiment.config.log_step_count_steps
)
if not _hook_name_already_exists(steps_per_second_hook, training_hooks):
training_hooks.append(steps_per_second_hook)
else:
logger.warning("do not add StepPerSecondHook as there is already one configured")
monitored_train_spec = experiment.train_spec._replace(
hooks=training_hooks
)
monitored_eval_spec = experiment.eval_spec._replace(
hooks=(evaluator_metrics.EvalMonitorHook(), *experiment.eval_spec.hooks)
)
experiment = experiment._replace(eval_spec=monitored_eval_spec, train_spec=monitored_train_spec)
return experiment
示例15: __init__
# 需要导入模块: import logging [as 别名]
# 或者: from logging import config [as 别名]
def __init__(self, args, config):
self.args = args
self.config = config
self.cache_dir = utils.get_cache_dir(config)
self.model_dir = utils.get_model_dir(config)
self.category = utils.get_category(config, self.cache_dir if os.path.exists(self.cache_dir) else None)
self.draw_bbox = utils.visualize.DrawBBox(self.category, colors=args.colors, thickness=args.thickness)
self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
self.height, self.width = tuple(map(int, config.get('image', 'size').split()))
self.path, self.step, self.epoch = utils.train.load_model(self.model_dir)
state_dict = torch.load(self.path, map_location=lambda storage, loc: storage)
self.dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), self.anchors, len(self.category))
self.dnn.load_state_dict(state_dict)
self.inference = model.Inference(config, self.dnn, self.anchors)
self.inference.eval()
if torch.cuda.is_available():
self.inference.cuda()
logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values())))
self.cap = self.create_cap()
self.keys = set(args.keys)
self.resize = transform.parse_transform(config, config.get('transform', 'resize_test'))
self.transform_image = transform.get_transform(config, config.get('transform', 'image_test').split())
self.transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())