本文整理汇总了Python中logbook.WARNING属性的典型用法代码示例。如果您正苦于以下问题:Python logbook.WARNING属性的具体用法?Python logbook.WARNING怎么用?Python logbook.WARNING使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类logbook
的用法示例。
在下文中一共展示了logbook.WARNING属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def __init__(
self,
filename=None,
parsing_params=None,
sampling_params=None,
name=None,
data_names=('default_asset',),
task=0,
log_level=WARNING,
_config_stack=None,
):
super(BTgymEpisode, self).__init__(
filename=filename,
parsing_params=parsing_params,
sampling_params=None,
name='episode',
task=task,
data_names=data_names,
log_level=log_level,
_config_stack=_config_stack
)
示例2: __get_logbook_logging_level
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def __get_logbook_logging_level(level_str):
# logbook levels:
# CRITICAL = 15
# ERROR = 14
# WARNING = 13
# NOTICE = 12
# INFO = 11
# DEBUG = 10
# TRACE = 9
# NOTSET = 0
level_str = level_str.upper().strip()
if level_str == 'CRITICAL':
return logbook.CRITICAL
elif level_str == 'ERROR':
return logbook.ERROR
elif level_str == 'WARNING':
return logbook.WARNING
elif level_str == 'NOTICE':
return logbook.NOTICE
elif level_str == 'INFO':
return logbook.INFO
elif level_str == 'DEBUG':
return logbook.DEBUG
elif level_str == 'TRACE':
return logbook.TRACE
elif level_str == 'NOTSET':
return logbook.NOTSET
else:
raise ValueError("Unknown logbook log level: {}".format(level_str))
示例3: __init__
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def __init__(self, history_size, max_sample_size, priority_sample_size, log_level=WARNING,
rollout_provider=None, task=-1, reward_threshold=0.1, use_priority_sampling=False):
"""
Args:
history_size: number of experiences stored;
max_sample_size: maximum allowed sample size (e.g. off-policy rollout length);
priority_sample_size: sample size of priority_sample() method
log_level: int, logbook.level;
rollout_provider: callable returning list of Rollouts NOT USED
task: parent worker id;
reward_threshold: if |experience.reward| > reward_threshold: experience is saved as 'prioritized';
"""
self._history_size = history_size
self._frames = deque(maxlen=history_size)
self.reward_threshold = reward_threshold
self.max_sample_size = int(max_sample_size)
self.priority_sample_size = int(priority_sample_size)
self.rollout_provider = rollout_provider
self.task = task
self.log_level = log_level
StreamHandler(sys.stdout).push_application()
self.log = Logger('ReplayMemory_{}'.format(self.task), level=self.log_level)
self.use_priority_sampling = use_priority_sampling
# Indices for non-priority frames:
self._zero_reward_indices = deque()
# Indices for priority frames:
self._non_zero_reward_indices = deque()
self._top_frame_index = 0
if use_priority_sampling:
self.sample_priority = self._sample_priority
else:
self.sample_priority = self._sample_dummy
示例4: enable_logging
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def enable_logging(
level: Optional[LogbookLevel] = None,
redirect_loggers: Optional[Mapping[str, LogbookLevel]] = None,
) -> None:
"""
Enable logging for the *saltyrtc* logger group.
Arguments:
- `level`: A :mod:`logbook` logging level. Defaults to
``WARNING``.
- `redirect_loggers`: A dictionary containing :mod:`logging`
logger names as key and the targeted :mod:`logbook` logging
level as value. Each logger will be looked up and redirected
to :mod:`logbook`. Defaults to an empty dictionary.
Raises :class:`ImportError` in case :mod:`logbook` is not
installed.
"""
if _logger_convert_level_handler is None:
_logging_error()
# At this point, logbook is either defined or an error has been returned
if level is None:
level = logbook.WARNING
logger_group.disabled = False
logger_group.level = level
if redirect_loggers is not None:
_redirect_logging_loggers(redirect_loggers, remove=False)
示例5: _get_logging_level
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def _get_logging_level(verbosity: int) -> LogbookLevel:
import logbook
return LogbookLevel({
1: logbook.CRITICAL,
2: logbook.ERROR,
3: logbook.WARNING,
4: logbook.NOTICE,
5: logbook.INFO,
6: logbook.DEBUG,
7: logbook.TRACE,
}[verbosity])
示例6: parse_log_level
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def parse_log_level(value):
# type: (str) -> logbook
value = value.lower()
if value == "info":
return logbook.INFO
elif value == "warning":
return logbook.WARNING
elif value == "error":
return logbook.ERROR
elif value == "debug":
return logbook.DEBUG
return logbook.WARNING
示例7: set_level
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def set_level(self, log_level):
if log_level.lower() == LogLevel.INFO:
self.logger.level = logbook.INFO
elif log_level.lower() == LogLevel.WARNING:
self.logger.level = logbook.WARNING
elif log_level.lower() == LogLevel.CRITICAL:
self.logger.level = logbook.CRITICAL
elif log_level.lower() == LogLevel.NOTSET:
self.logger.level = logbook.NOTSET
示例8: enable_logging
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def enable_logging(level=logbook.WARNING, asyncio_level=None, aiohttp_level=None):
# Determine levels
level = logbook.lookup_level(level)
converted_level = _convert_level(level)
if asyncio_level is None:
asyncio_level = converted_level
else:
asyncio_level = _convert_level(asyncio_level)
if aiohttp_level is None:
aiohttp_level = converted_level
else:
aiohttp_level = _convert_level(aiohttp_level)
# Enable logger group
_logger_group.disabled = False
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Redirect asyncio logger
logger = logging.getLogger('asyncio')
logger.setLevel(asyncio_level)
logger.addHandler(_logger_redirect_handler)
# Redirect aiohttp logger
logger = logging.getLogger('aiohttp')
logger.setLevel(aiohttp_level)
logger.addHandler(_logger_redirect_handler)
示例9: setup_sentry
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def setup_sentry(name, channel, dsn):
"""
Setup sentry account using taskcluster secrets
"""
# Detect environment
task_id = os.environ.get("TASK_ID")
if task_id is not None:
site = "taskcluster"
elif "DYNO" in os.environ:
site = "heroku"
else:
site = "unknown"
sentry_client = raven.Client(
dsn=dsn,
site=site,
name=name,
environment=channel,
release=raven.fetch_package_version(f"code-coverage-{name}"),
)
if task_id is not None:
# Add a Taskcluster task id when available
# It will be shown in the Additional Data section on the dashboard
sentry_client.context.merge({"extra": {"task_id": task_id}})
sentry_handler = raven.handlers.logbook.SentryHandler(
sentry_client, level=logbook.WARNING, bubble=True
)
sentry_handler.push_application()
示例10: setup_sentry
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def setup_sentry(name, channel, dsn):
"""
Setup sentry account using taskcluster secrets
"""
# Detect environment
task_id = os.environ.get("TASK_ID")
if task_id is not None:
site = "taskcluster"
elif "DYNO" in os.environ:
site = "heroku"
else:
site = "unknown"
sentry_client = raven.Client(
dsn=dsn,
site=site,
name=name,
environment=channel,
release=raven.fetch_package_version(f"code-review-{name}"),
)
if task_id is not None:
# Add a Taskcluster task id when available
# It will be shown in the Additional Data section on the dashboard
sentry_client.context.merge({"extra": {"task_id": task_id}})
sentry_handler = raven.handlers.logbook.SentryHandler(
sentry_client, level=logbook.WARNING, bubble=True
)
sentry_handler.push_application()
示例11: __init__
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def __init__(self,
env,
policy,
task,
rollout_length,
episode_summary_freq,
env_render_freq,
test,
ep_summary,
runner_fn_ref=BaseEnvRunnerFn,
memory_config=None,
log_level=WARNING,
**kwargs):
"""
Args:
env: environment instance
policy: policy instance
task: int
rollout_length: int
episode_summary_freq: int
env_render_freq: int
test: Atari or BTGyn
ep_summary: tf.summary
runner_fn_ref: callable defining runner execution logic
memory_config: replay memory configuration dictionary
log_level: int, logbook.level
"""
threading.Thread.__init__(self)
self.queue = queue.Queue(5)
self.rollout_length = rollout_length
self.env = env
self.last_features = None
self.policy = policy
self.runner_fn_ref = runner_fn_ref
self.daemon = True
self.sess = None
self.summary_writer = None
self.episode_summary_freq = episode_summary_freq
self.env_render_freq = env_render_freq
self.task = task
self.test = test
self.ep_summary = ep_summary
self.memory_config = memory_config
self.log_level = log_level
StreamHandler(sys.stdout).push_application()
self.log = Logger('ThreadRunner_{}'.format(self.task), level=self.log_level)
示例12: server_factory
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def server_factory(request, event_loop, server_permanent_keys):
"""
Return a factory to create :class:`saltyrtc.Server` instances.
"""
# Enable asyncio debug logging
event_loop.set_debug(True)
# Enable logging
util.enable_logging(level=logbook.DEBUG, redirect_loggers={
'asyncio': logbook.WARNING,
'websockets': logbook.WARNING,
})
# Push handlers
logging_handler = logbook.StderrHandler(bubble=True)
logging_handler.push_application()
_server_instances = []
def _server_factory(permanent_keys=None):
if permanent_keys is None:
permanent_keys = server_permanent_keys
# Setup server
port = unused_tcp_port()
coroutine = serve(
util.create_ssl_context(
pytest.saltyrtc.cert, keyfile=pytest.saltyrtc.key,
dh_params_file=pytest.saltyrtc.dh_params),
permanent_keys,
host=pytest.saltyrtc.host,
port=port,
loop=event_loop,
server_class=TestServer,
)
server_ = event_loop.run_until_complete(coroutine)
# Inject timeout and address (little bit of a hack but meh...)
server_.timeout = _get_timeout(request=request)
server_.address = (pytest.saltyrtc.host, port)
_server_instances.append(server_)
def fin():
server_.close()
event_loop.run_until_complete(server_.wait_closed())
_server_instances.remove(server_)
if len(_server_instances) == 0:
logging_handler.pop_application()
request.addfinalizer(fin)
return server_
return _server_factory
示例13: test_run_examples
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def test_run_examples(self):
# folder = join('..', '..', '..', 'catalyst', 'examples')
HERE = os.path.dirname(os.path.abspath(__file__))
folder = os.path.join(HERE, '..', '..', '..', 'catalyst', 'examples')
files = [f for f in os.listdir(folder)
if os.path.isfile(os.path.join(folder, f))]
algo_list = []
for filename in files:
name = os.path.basename(filename)
if filter_algos and name not in filter_algos:
continue
module_name = 'catalyst.examples.{}'.format(
name.replace('.py', '')
)
algo_list.append(module_name)
exchanges = ['poloniex', 'bittrex', 'binance']
asset_name = 'btc_usdt'
quote_currency = 'usdt'
capital_base = 10000
data_freq = 'daily'
start_date = pd.to_datetime('2017-10-01', utc=True)
end_date = pd.to_datetime('2017-12-01', utc=True)
for exchange_name in exchanges:
ingest_exchange_bundles(exchange_name, data_freq, asset_name)
for module_name in algo_list:
algo = importlib.import_module(module_name)
# namespace = module_name.replace('.', '_')
log_catcher = TestHandler()
with log_catcher:
run_algorithm(
capital_base=capital_base,
data_frequency=data_freq,
initialize=algo.initialize,
handle_data=algo.handle_data,
analyze=TestSuiteAlgo.analyze,
exchange_name=exchange_name,
algo_namespace='test_{}'.format(exchange_name),
quote_currency=quote_currency,
start=start_date,
end=end_date,
# output=out
)
warnings = [record for record in log_catcher.records if
record.level == WARNING]
assert(len(warnings) == 1)
assert (warnings[0].message == ALPHA_WARNING_MESSAGE)
assert (not log_catcher.has_errors)
assert (not log_catcher.has_criticals)
clean_exchange_bundles(exchange_name, data_freq)
示例14: _test_eod_order_cancel_minute
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def _test_eod_order_cancel_minute(self, direction, minute_emission):
"""
Test that EOD order cancel works in minute mode for both shorts and
longs, and both daily emission and minute emission
"""
# order 1000 shares of asset1. the volume is only 1 share per bar,
# so the order should be cancelled at the end of the day.
algo = self.prep_algo(
"set_cancel_policy(cancel_policy.EODCancel())",
amount=np.copysign(1000, direction),
minute_emission=minute_emission
)
log_catcher = TestHandler()
with log_catcher:
results = algo.run(self.data_portal)
for daily_positions in results.positions:
self.assertEqual(1, len(daily_positions))
self.assertEqual(
np.copysign(389, direction),
daily_positions[0]["amount"],
)
self.assertEqual(1, results.positions[0][0]["sid"])
# should be an order on day1, but no more orders afterwards
np.testing.assert_array_equal([1, 0, 0],
list(map(len, results.orders)))
# should be 389 txns on day 1, but no more afterwards
np.testing.assert_array_equal([389, 0, 0],
list(map(len, results.transactions)))
the_order = results.orders[0][0]
self.assertEqual(ORDER_STATUS.CANCELLED, the_order["status"])
self.assertEqual(np.copysign(389, direction), the_order["filled"])
warnings = [record for record in log_catcher.records if
record.level == WARNING]
self.assertEqual(1, len(warnings))
if direction == 1:
self.assertEqual(
"Your order for 1000 shares of ASSET1 has been partially "
"filled. 389 shares were successfully purchased. "
"611 shares were not filled by the end of day and "
"were canceled.",
str(warnings[0].message)
)
elif direction == -1:
self.assertEqual(
"Your order for -1000 shares of ASSET1 has been partially "
"filled. 389 shares were successfully sold. "
"611 shares were not filled by the end of day and "
"were canceled.",
str(warnings[0].message)
)
示例15: _test_order_in_quiet_period
# 需要导入模块: import logbook [as 别名]
# 或者: from logbook import WARNING [as 别名]
def _test_order_in_quiet_period(self, name, sid):
asset = self.asset_finder.retrieve_asset(sid)
algo_code = dedent("""
from catalyst.api import (
sid,
order,
order_value,
order_percent,
order_target,
order_target_percent,
order_target_value
)
def initialize(context):
pass
def handle_data(context, data):
order(sid({sid}), 1)
order_value(sid({sid}), 100)
order_percent(sid({sid}), 0.5)
order_target(sid({sid}), 50)
order_target_percent(sid({sid}), 0.5)
order_target_value(sid({sid}), 50)
""").format(sid=sid)
# run algo from 1/6 to 1/7
algo = TradingAlgorithm(
script=algo_code,
env=self.env,
sim_params=SimulationParameters(
start_session=pd.Timestamp("2016-01-06", tz='UTC'),
end_session=pd.Timestamp("2016-01-07", tz='UTC'),
trading_calendar=self.trading_calendar,
data_frequency="minute"
)
)
with make_test_handler(self) as log_catcher:
algo.run(self.data_portal)
warnings = [r for r in log_catcher.records
if r.level == logbook.WARNING]
# one warning per order on the second day
self.assertEqual(6 * 390, len(warnings))
for w in warnings:
expected_message = (
'Cannot place order for ASSET{sid}, as it has de-listed. '
'Any existing positions for this asset will be liquidated '
'on {date}.'.format(sid=sid, date=asset.auto_close_date)
)
self.assertEqual(expected_message, w.message)