本文整理汇总了Python中jobbrowser.conf.SHARE_JOBS.set_for_testing方法的典型用法代码示例。如果您正苦于以下问题:Python SHARE_JOBS.set_for_testing方法的具体用法?Python SHARE_JOBS.set_for_testing怎么用?Python SHARE_JOBS.set_for_testing使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类jobbrowser.conf.SHARE_JOBS
的用法示例。
在下文中一共展示了SHARE_JOBS.set_for_testing方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_job_permissions
# 需要导入模块: from jobbrowser.conf import SHARE_JOBS [as 别名]
# 或者: from jobbrowser.conf.SHARE_JOBS import set_for_testing [as 别名]
def test_job_permissions(self):
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
示例2: setUp
# 需要导入模块: from jobbrowser.conf import SHARE_JOBS [as 别名]
# 或者: from jobbrowser.conf.SHARE_JOBS import set_for_testing [as 别名]
def setUp(self):
# Beware: Monkey patching
if not hasattr(resource_manager_api, 'old_get_resource_manager_api'):
resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager
if not hasattr(resource_manager_api, 'old_get_mapreduce_api'):
mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api
if not hasattr(history_server_api, 'old_get_history_server_api'):
history_server_api.old_get_history_server_api = history_server_api.get_history_server_api
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "jobbrowser")
self.user = User.objects.get(username='test')
self.c2 = make_logged_in_client(is_superuser=False, username="test2")
grant_access("test2", "test2", "jobbrowser")
self.user2 = User.objects.get(username='test2')
resource_manager_api.get_resource_manager = lambda user: MockResourceManagerApi(user)
mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi()
history_server_api.get_history_server_api = lambda: HistoryServerApi()
self.finish = [
YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True),
SHARE_JOBS.set_for_testing(False)
]
assert_true(cluster.is_yarn())
示例3: test_job
# 需要导入模块: from jobbrowser.conf import SHARE_JOBS [as 别名]
# 或者: from jobbrowser.conf.SHARE_JOBS import set_for_testing [as 别名]
def test_job(self):
"""
Test new job views.
The status of the jobs should be the same as the status reported back by oozie.
In this case, all jobs should succeed.
"""
# Run the sleep example, since it doesn't require user home directory
design_id = self.design.id
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/')
assert_true(hadoop_job_id_short in response.content, response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?state=completed')
assert_true(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=running')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Check sharing permissions
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_false(hadoop_job_id_short in response.content)
finally:
finish()
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
# We can't just check the complete contents of the python map because the
# SLOTS_MILLIS_* entries have a variable number of milliseconds from
# run-to-run.
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 2L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0L)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)
# There should be 4 tasks for this job: cleanup, setup, map, reduce
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
#.........这里部分代码省略.........
示例4: test_job
# 需要导入模块: from jobbrowser.conf import SHARE_JOBS [as 别名]
# 或者: from jobbrowser.conf.SHARE_JOBS import set_for_testing [as 别名]
def test_job(self):
"""
Test new job views.
The status of the jobs should be the same as the status reported back by oozie.
In this case, all jobs should succeed.
"""
# Clone design
assert_equal(0, OozieDesign.objects.filter(owner__username=self.username).count())
self.client.post('/jobsub/clone_design/%d' % self.sleep_design_id)
assert_equal(1, OozieDesign.objects.filter(owner__username=self.username).count())
# Run the sleep example, since it doesn't require user home directory
design_id = OozieDesign.objects.get(owner__username=self.username).id
response = self.client.post("/jobsub/submit_design/%d" % (design_id,),
dict(map_sleep_time=1,
num_maps=1,
num_reduces=1,
reduce_sleep_time=1),
follow=True)
oozie_jobid = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/')
assert_true(hadoop_job_id_short in response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?state=completed')
assert_true(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=running')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Check sharing permissions
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_false(hadoop_job_id_short in response.content)
finally:
finish()
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
# We can't just check the complete contents of the python map because the
# SLOTS_MILLIS_* entries have a variable number of milliseconds from
# run-to-run.
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)
# There should be 4 tasks for this job: cleanup, setup, map, reduce
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
#.........这里部分代码省略.........