本文整理汇总了Python中jobbrowser.conf.SHARE_JOBS类的典型用法代码示例。如果您正苦于以下问题:Python SHARE_JOBS类的具体用法?Python SHARE_JOBS怎么用?Python SHARE_JOBS使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SHARE_JOBS类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_job_permissions
def test_job_permissions(self):
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
示例2: filter_jobs
def filter_jobs(self, user, jobs, **kwargs):
check_permission = not SHARE_JOBS.get() and not user.is_superuser
return filter(lambda job:
not check_permission or
user.is_superuser or
job.user == user.username, jobs)
示例3: setUp
def setUp(self):
# Beware: Monkey patching
if not hasattr(resource_manager_api, 'old_get_resource_manager_api'):
resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager
if not hasattr(resource_manager_api, 'old_get_mapreduce_api'):
mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api
if not hasattr(history_server_api, 'old_get_history_server_api'):
history_server_api.old_get_history_server_api = history_server_api.get_history_server_api
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "jobbrowser")
self.user = User.objects.get(username='test')
self.c2 = make_logged_in_client(is_superuser=False, username="test2")
grant_access("test2", "test2", "jobbrowser")
self.user2 = User.objects.get(username='test2')
resource_manager_api.get_resource_manager = lambda user: MockResourceManagerApi(user)
mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi()
history_server_api.get_history_server_api = lambda: HistoryServerApi()
self.finish = [
YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True),
SHARE_JOBS.set_for_testing(False)
]
assert_true(cluster.is_yarn())
示例4: filter_jobs
def filter_jobs(self, user, jobs, **kwargs):
check_permission = not SHARE_JOBS.get() and not is_admin(user)
return filter(lambda job:
not check_permission or
is_admin(user) or
job.user == user.username, jobs)
示例5: get_api
jobid = kwargs['job']
try:
job = get_api(request.user, request.jt).get_job(jobid=jobid)
except ApplicationNotRunning, e:
if e.job.get('state', '').lower() == 'accepted' and 'kill' in request.path:
rm_api = resource_manager_api.get_resource_manager(request.user)
job = Application(e.job, rm_api)
else:
# reverse() seems broken, using request.path but beware, it discards GET and POST info
return job_not_assigned(request, jobid, request.path)
except JobExpired, e:
raise PopupException(_('Job %s has expired.') % jobid, detail=_('Cannot be found on the History Server.'))
except Exception, e:
raise PopupException(_('Could not find job %s.') % jobid, detail=e)
if not SHARE_JOBS.get() and not request.user.is_superuser \
and job.user != request.user.username and not can_view_job(request.user.username, job):
raise PopupException(_("You don't have permission to access job %(id)s.") % {'id': jobid})
kwargs['job'] = job
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def job_not_assigned(request, jobid, path):
if request.GET.get('format') == 'json':
result = {'status': -1, 'message': ''}
try:
get_api(request.user, request.jt).get_job(jobid=jobid)
result['status'] = 0
except ApplicationNotRunning, e:
示例6: test_job
def test_job(self):
"""
Test new job views.
The status of the jobs should be the same as the status reported back by oozie.
In this case, all jobs should succeed.
"""
# Run the sleep example, since it doesn't require user home directory
design_id = self.design.id
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/')
assert_true(hadoop_job_id_short in response.content, response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?state=completed')
assert_true(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=running')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Check sharing permissions
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_false(hadoop_job_id_short in response.content)
finally:
finish()
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
# We can't just check the complete contents of the python map because the
# SLOTS_MILLIS_* entries have a variable number of milliseconds from
# run-to-run.
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 2L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0L)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)
# There should be 4 tasks for this job: cleanup, setup, map, reduce
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
#.........这里部分代码省略.........
示例7: test_job
def test_job(self):
"""
Test new job views.
The status of the jobs should be the same as the status reported back by oozie.
In this case, all jobs should succeed.
"""
# Clone design
assert_equal(0, OozieDesign.objects.filter(owner__username=self.username).count())
self.client.post('/jobsub/clone_design/%d' % self.sleep_design_id)
assert_equal(1, OozieDesign.objects.filter(owner__username=self.username).count())
# Run the sleep example, since it doesn't require user home directory
design_id = OozieDesign.objects.get(owner__username=self.username).id
response = self.client.post("/jobsub/submit_design/%d" % (design_id,),
dict(map_sleep_time=1,
num_maps=1,
num_reduces=1,
reduce_sleep_time=1),
follow=True)
oozie_jobid = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/')
assert_true(hadoop_job_id_short in response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?state=completed')
assert_true(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=running')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Check sharing permissions
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_false(hadoop_job_id_short in response.content)
finally:
finish()
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
# We can't just check the complete contents of the python map because the
# SLOTS_MILLIS_* entries have a variable number of milliseconds from
# run-to-run.
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)
# There should be 4 tasks for this job: cleanup, setup, map, reduce
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
#.........这里部分代码省略.........
示例8: Application
except ApplicationNotRunning, e:
if e.job.get("state", "").lower() == "accepted" and "kill" in request.path:
rm_api = resource_manager_api.get_resource_manager(request.user)
job = Application(e.job, rm_api)
else:
# reverse() seems broken, using request.path but beware, it discards GET and POST info
return job_not_assigned(request, jobid, request.path)
except JobExpired, e:
raise PopupException(_("Job %s has expired.") % jobid, detail=_("Cannot be found on the History Server."))
except Exception, e:
msg = "Could not find job %s."
LOGGER.exception(msg % jobid)
raise PopupException(_(msg) % jobid, detail=e)
if (
not SHARE_JOBS.get()
and not request.user.is_superuser
and job.user != request.user.username
and not can_view_job(request.user.username, job)
):
raise PopupException(_("You don't have permission to access job %(id)s.") % {"id": jobid})
kwargs["job"] = job
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def job_not_assigned(request, jobid, path):
if request.GET.get("format") == "json":
result = {"status": -1, "message": ""}