本文整理汇总了Python中tests.quiet.no_handlers_for_logger函数的典型用法代码示例。如果您正苦于以下问题:Python no_handlers_for_logger函数的具体用法?Python no_handlers_for_logger怎么用?Python no_handlers_for_logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了no_handlers_for_logger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_precedence_deprecated
def test_precedence_deprecated(self):
os.environ['HOME'] = '/home/foo'
os.environ['PYTHONPATH'] = '/py1:/py2'
self._existing_paths = set()
assert_equal(find_mrjob_conf(), None)
self._existing_paths.add('/etc/mrjob.conf')
assert_equal(find_mrjob_conf(), '/etc/mrjob.conf')
self._existing_paths.add('/py2/mrjob.conf')
with no_handlers_for_logger():
buf = self._log_to_buffer()
assert_equal(find_mrjob_conf(), '/py2/mrjob.conf')
assert_in('This config path is deprecated', buf.getvalue())
self._existing_paths.add('/py1/mrjob.conf')
with no_handlers_for_logger():
buf = self._log_to_buffer()
assert_equal(find_mrjob_conf(), '/py1/mrjob.conf')
assert_in('This config path is deprecated', buf.getvalue())
self._existing_paths.add('/home/foo/.mrjob')
with no_handlers_for_logger():
buf = self._log_to_buffer()
assert_equal(find_mrjob_conf(), '/home/foo/.mrjob')
assert_in('This config path is deprecated', buf.getvalue())
mrjob_conf_path = os.path.join(self.tmp_dir, 'mrjob.conf')
open(mrjob_conf_path, 'w').close()
os.environ['MRJOB_CONF'] = mrjob_conf_path
self._existing_paths.add(mrjob_conf_path)
assert_equal(find_mrjob_conf(), mrjob_conf_path)
示例2: test_precedence_deprecated
def test_precedence_deprecated(self):
os.environ["HOME"] = "/home/foo"
os.environ["PYTHONPATH"] = "/py1:/py2"
self._existing_paths = set()
self.assertEqual(find_mrjob_conf(), None)
self._existing_paths.add("/etc/mrjob.conf")
self.assertEqual(find_mrjob_conf(), "/etc/mrjob.conf")
self._existing_paths.add("/py2/mrjob.conf")
with no_handlers_for_logger():
buf = log_to_buffer("mrjob.conf")
self.assertEqual(find_mrjob_conf(), "/py2/mrjob.conf")
self.assertIn("This config path is deprecated", buf.getvalue())
self._existing_paths.add("/py1/mrjob.conf")
with no_handlers_for_logger():
buf = log_to_buffer("mrjob.conf")
self.assertEqual(find_mrjob_conf(), "/py1/mrjob.conf")
self.assertIn("This config path is deprecated", buf.getvalue())
self._existing_paths.add("/home/foo/.mrjob")
with no_handlers_for_logger():
buf = log_to_buffer("mrjob.conf")
self.assertEqual(find_mrjob_conf(), "/home/foo/.mrjob")
self.assertIn("This config path is deprecated", buf.getvalue())
mrjob_conf_path = os.path.join(self.tmp_dir, "mrjob.conf")
open(mrjob_conf_path, "w").close()
os.environ["MRJOB_CONF"] = mrjob_conf_path
self._existing_paths.add(mrjob_conf_path)
self.assertEqual(find_mrjob_conf(), mrjob_conf_path)
示例3: test_python_dash_v_as_python_bin
def test_python_dash_v_as_python_bin(self):
python_cmd = cmd_line([sys.executable or 'python', '-v'])
mr_job = MRTwoStepJob(['--python-bin', python_cmd, '--no-conf',
'-r', 'local'])
mr_job.sandbox(stdin=[b'bar\n'])
with no_handlers_for_logger():
with mr_job.make_runner() as runner:
runner.run()
# expect python -v crud in stderr
with open(runner._task_stderr_path('mapper', 0, 0)) as lines:
self.assertTrue(any(
'import mrjob' in line or # Python 2
"import 'mrjob'" in line
for line in lines))
with open(runner._task_stderr_path('mapper', 0, 0)) as lines:
self.assertTrue(any(
'#' in line for line in lines))
# should still get expected results
self.assertEqual(
sorted(to_lines(runner.cat_output())),
sorted([b'1\tnull\n', b'1\t"bar"\n']))
示例4: test_non_log_lines
def test_non_log_lines(self):
lines = StringIO('foo\n'
'bar\n'
'15/12/11 13:26:08 ERROR streaming.StreamJob:'
' Error Launching job :'
' Output directory already exists\n'
'Streaming Command Failed!')
with no_handlers_for_logger('mrjob.logs.parse'):
stderr = StringIO()
log_to_stream('mrjob.logs.parse', stderr)
self.assertEqual(
list(_parse_hadoop_log_lines(lines)), [
# ignore leading non-log lines
dict(
timestamp='15/12/11 13:26:08',
level='ERROR',
logger='streaming.StreamJob',
thread=None,
# no way to know that Streaming Command Failed! wasn't part
# of a multi-line message
message=('Error Launching job :'
' Output directory already exists\n'
'Streaming Command Failed!'))
])
# should be one warning for each leading non-log line
log_lines = stderr.getvalue().splitlines()
self.assertEqual(len(log_lines), 2)
示例5: test_kill_persistent_cluster
def test_kill_persistent_cluster(self):
with no_handlers_for_logger("mrjob.dataproc"):
r = self._quick_runner()
with patch.object(mrjob.dataproc.DataprocJobRunner, "_api_cluster_delete") as m:
r._opts["cluster_id"] = "j-MOCKCLUSTER0"
r._cleanup_cluster()
self.assertTrue(m.called)
示例6: test_failed_job
def test_failed_job(self):
mr_job = MRTwoStepJob(['-r', 'dataproc', '-v'])
mr_job.sandbox()
with no_handlers_for_logger('mrjob.dataproc'):
stderr = StringIO()
log_to_stream('mrjob.dataproc', stderr)
self._dataproc_client.job_get_advances_states = (
collections.deque(['SETUP_DONE', 'RUNNING', 'ERROR']))
with mr_job.make_runner() as runner:
self.assertIsInstance(runner, DataprocJobRunner)
self.assertRaises(StepFailedException, runner.run)
self.assertIn(' => ERROR\n', stderr.getvalue())
cluster_id = runner.get_cluster_id()
# job should get terminated
cluster = (
self._dataproc_client._cache_clusters[_TEST_PROJECT][cluster_id])
cluster_state = self._dataproc_client.get_state(cluster)
self.assertEqual(cluster_state, 'DELETING')
示例7: test_can_turn_off_bootstrap_mrjob
def test_can_turn_off_bootstrap_mrjob(self):
with mrjob_conf_patcher(
{'runners': {'local': {'bootstrap_mrjob': False}}}):
mr_job = MRJobWhereAreYou(['-r', 'local'])
mr_job.sandbox()
with mr_job.make_runner() as runner:
# sanity check
self.assertEqual(runner._opts['bootstrap_mrjob'], False)
local_tmp_dir = os.path.realpath(runner._get_local_tmp_dir())
try:
with no_handlers_for_logger():
runner.run()
except StepFailedException:
# this is what happens when mrjob isn't installed elsewhere
return
# however, if mrjob is installed, we need to verify that
# we're using the installed version and not a bootstrapped copy
output = list(mr_job.parse_output(runner.cat_output()))
self.assertEqual(len(output), 1)
# script should not load mrjob from local_tmp_dir
_, script_mrjob_dir = output[0]
self.assertFalse(script_mrjob_dir.startswith(local_tmp_dir))
示例8: test_non_log_lines
def test_non_log_lines(self):
lines = StringIO(
"foo\n"
"bar\n"
"15/12/11 13:26:08 ERROR streaming.StreamJob:"
" Error Launching job :"
" Output directory already exists\n"
"Streaming Command Failed!"
)
with no_handlers_for_logger("mrjob.logs.parse"):
stderr = StringIO()
log_to_stream("mrjob.logs.parse", stderr)
self.assertEqual(
list(_parse_hadoop_log_lines(lines)),
[
# ignore leading non-log lines
dict(
timestamp="15/12/11 13:26:08",
level="ERROR",
logger="streaming.StreamJob",
thread=None,
# no way to know that Streaming Command Failed! wasn't part
# of a multi-line message
message=(
"Error Launching job :" " Output directory already exists\n" "Streaming Command Failed!"
),
)
],
)
# should be one warning for each leading non-log line
log_lines = stderr.getvalue().splitlines()
self.assertEqual(len(log_lines), 2)
示例9: test_hadoop_runner
def test_hadoop_runner(self):
# you can't instantiate a HadoopJobRunner without Hadoop installed
launcher = MRJobLauncher(args=["--no-conf", "-r", "hadoop", "", "--hadoop-streaming-jar", "HUNNY"])
with no_handlers_for_logger("mrjob.runner"):
with patch.dict(os.environ, {"HADOOP_HOME": "100-Acre Wood"}):
with launcher.make_runner() as runner:
self.assertIsInstance(runner, HadoopJobRunner)
示例10: test_deprecated_mapper_final_positional_arg
def test_deprecated_mapper_final_positional_arg(self):
def mapper(k, v):
pass
def reducer(k, v):
pass
def mapper_final():
pass
stderr = StringIO()
with no_handlers_for_logger():
log_to_stream('mrjob.job', stderr)
step = MRJob.mr(mapper, reducer, mapper_final)
# should be allowed to specify mapper_final as a positional arg,
# but we log a warning
self.assertEqual(
step,
MRJob.mr(
mapper=mapper, reducer=reducer, mapper_final=mapper_final))
self.assertIn('mapper_final should be specified', stderr.getvalue())
# can't specify mapper_final as a positional and keyword arg
self.assertRaises(
TypeError,
MRJob.mr,
mapper,
reducer,
mapper_final,
mapper_final=mapper_final)
示例11: assert_hadoop_version
def assert_hadoop_version(self, JobClass, version_string):
mr_job = JobClass()
mock_log = StringIO()
with no_handlers_for_logger("mrjob.job"):
log_to_stream("mrjob.job", mock_log)
self.assertEqual(mr_job.jobconf()["hadoop_version"], version_string)
self.assertIn("should be a string", mock_log.getvalue())
示例12: _test_round_trip
def _test_round_trip(self, conf):
conf_path = os.path.join(self.tmp_dir, 'mrjob.conf')
with open(conf_path, 'w') as f:
dump_mrjob_conf(conf, f)
with no_handlers_for_logger('mrjob.conf'):
self.assertEqual(conf, load_mrjob_conf(conf_path=conf_path))
示例13: test_large_amounts_of_stderr
def test_large_amounts_of_stderr(self):
mr_job = MRVerboseJob(['--no-conf', '-r', 'local', '-v'])
mr_job.sandbox()
try:
with no_handlers_for_logger():
mr_job.run_job()
except TimeoutException:
raise
except SystemExit:
# we expect the job to throw a StepFailedException,
# which causes run_job to call sys.exit()
# look for expected output from MRVerboseJob
stderr = mr_job.stderr.getvalue()
self.assertIn(
b"Counters: 1\n\tFoo\n\t\tBar=10000", stderr)
self.assertIn(b'Status: 0\n', stderr)
self.assertIn(b'Status: 99\n', stderr)
self.assertNotIn(b'Status: 100\n', stderr)
self.assertIn(b'STDERR: Qux\n', stderr)
# exception should appear in exception message
self.assertIn(b'BOOM', stderr)
else:
raise AssertionError()
示例14: test_round_trip
def test_round_trip(self):
conf = {"runners": {"foo": {"qux": "quux"}}}
conf_path = os.path.join(self.tmp_dir, "mrjob.conf")
dump_mrjob_conf(conf, open(conf_path, "w"))
with no_handlers_for_logger("mrjob.conf"):
self.assertEqual(conf, load_mrjob_conf(conf_path=conf_path))
示例15: test_messy_error
def test_messy_error(self):
counter_string = 'Job JOBID="_001" FAILED_REDUCES="0" COUNTERS="THIS IS NOT ACTUALLY A COUNTER"'
with no_handlers_for_logger(''):
stderr = StringIO()
log_to_stream('mrjob.parse', stderr, level=logging.WARN)
assert_equal((None, None), parse_hadoop_counters_from_line(counter_string))
assert_in('Cannot parse Hadoop counter line', stderr.getvalue())