本文整理汇总了Python中common.CpuSnapshot.free_processors_available_at方法的典型用法代码示例。如果您正苦于以下问题:Python CpuSnapshot.free_processors_available_at方法的具体用法?Python CpuSnapshot.free_processors_available_at怎么用?Python CpuSnapshot.free_processors_available_at使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类common.CpuSnapshot
的用法示例。
在下文中一共展示了CpuSnapshot.free_processors_available_at方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: HeadDoubleEasyScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class HeadDoubleEasyScheduler(EasyBackfillScheduler):
def __init__(self, num_processors):
super(HeadDoubleEasyScheduler, self).__init__(num_processors)
self.cpu_snapshot = CpuSnapshot(num_processors)
def _schedule_head_of_list(self, current_time):
"Overriding parent method"
result = []
while True:
if len(self.unscheduled_jobs) == 0:
break
# check if the first job can be scheduled at current time
if (
self.cpu_snapshot.free_processors_available_at(current_time)
>= self.unscheduled_jobs[0].num_required_processors
):
job = self.unscheduled_jobs.pop(0)
job.predicted_run_time = 2 * job.user_estimated_run_time # doubling is done here
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
# first job can't be scheduled
break
return result
示例2: FcfsScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class FcfsScheduler(Scheduler):
def __init__(self, options):
super(FcfsScheduler, self).__init__(options)
self.cpu_snapshot = CpuSnapshot(self.num_processors, options["stats"])
self.waiting_queue_of_jobs = []
def new_events_on_job_submission(self, job, current_time):
self.cpu_snapshot.archive_old_slices(current_time)
self.waiting_queue_of_jobs.append(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def new_events_on_job_termination(self, job, current_time):
self.cpu_snapshot.archive_old_slices(current_time)
self.cpu_snapshot.delTailofJobFromCpuSlices(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def _schedule_jobs(self, current_time):
result = []
while len(self.waiting_queue_of_jobs) > 0:
job = self.waiting_queue_of_jobs[0]
if self.cpu_snapshot.free_processors_available_at(current_time) >= job.num_required_processors:
self.waiting_queue_of_jobs.pop(0)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
break
return result
示例3: LogScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class LogScheduler(Scheduler):
def __init__(self, num_processors):
super(LogScheduler, self).__init__(num_processors)
self.cpu_snapshot = CpuSnapshot(num_processors)
self.waiting_queue_of_jobs = []
def new_events_on_job_submission(self, job, current_time):
self.cpu_snapshot.archive_old_slices(current_time)
result = []
#self.waiting_queue_of_jobs.append(job)
result.append(JobStartEvent(current_time+job.actual_wait_time, job))
#return [
# JobStartEvent(current_time, job)
# for job in self._log_schedule_jobs(current_time)
#]
return result
def new_events_on_job_termination(self, job, current_time):
self.cpu_snapshot.archive_old_slices(current_time)
self.cpu_snapshot.delTailofJobFromCpuSlices(job)
return []
"""return [
JobStartEvent(current_time, job)
for job in self._log_schedule_jobs(current_time)
]"""
def _schedule_jobs(self, current_time):
result = []
while len(self.waiting_queue_of_jobs) > 0:
job = self.waiting_queue_of_jobs[0]
if self.cpu_snapshot.free_processors_available_at(current_time) >= job.num_required_processors:
self.waiting_queue_of_jobs.pop(0)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
break
return result
def _log_schedule_jobs(self, current_time): #by Siddharth
result = []
return result
"""while len(self.waiting_queue_of_jobs) > 0:
开发者ID:kruthikavishwanath,项目名称:project-to-predict-wait-times-in-tyrone-cluster,代码行数:46,代码来源:log_scheduler.py
示例4: EasyBackfillScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class EasyBackfillScheduler(Scheduler):
def __init__(self, options):
super(EasyBackfillScheduler, self).__init__(options)
self.cpu_snapshot = CpuSnapshot(self.num_processors, options["stats"])
self.unscheduled_jobs = []
def new_events_on_job_submission(self, just_submitted_job, current_time):
""" Here we first add the new job to the waiting list. We then try to schedule
the jobs in the waiting list, returning a collection of new termination events """
# TODO: a probable performance bottleneck because we reschedule all the
# jobs. Knowing that only one new job is added allows more efficient
# scheduling here.
self.cpu_snapshot.archive_old_slices(current_time)
self.unscheduled_jobs.append(just_submitted_job)
retl = []
if (self.cpu_snapshot.free_processors_available_at(current_time) >= just_submitted_job.num_required_processors):
for job in self._schedule_jobs(current_time):
retl.append(JobStartEvent(current_time, job))
return retl
def new_events_on_job_termination(self, job, current_time):
""" Here we first delete the tail of the just terminated job (in case it's
done before user estimation time). We then try to schedule the jobs in the waiting list,
returning a collection of new termination events """
self.cpu_snapshot.archive_old_slices(current_time)
self.cpu_snapshot.delTailofJobFromCpuSlices(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def _schedule_jobs(self, current_time):
"Schedules jobs that can run right now, and returns them"
jobs = self._schedule_head_of_list(current_time)
jobs += self._backfill_jobs(current_time)
return jobs
def _schedule_head_of_list(self, current_time):
result = []
while True:
if len(self.unscheduled_jobs) == 0:
break
# Try to schedule the first job
if self.cpu_snapshot.free_processors_available_at(current_time) >= self.unscheduled_jobs[0].num_required_processors:
job = self.unscheduled_jobs.pop(0)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
# first job can't be scheduled
break
return result
def _backfill_jobs(self, current_time):
"""
Find jobs that can be backfilled and update the cpu snapshot.
DEPRECATED FUNCTION !!!!!!
"""
if len(self.unscheduled_jobs) <= 1:
return []
result = []
tail_of_waiting_list = list_copy(self.unscheduled_jobs[1:])
first_job = self.unscheduled_jobs[0]
self.cpu_snapshot.assignJobEarliest(first_job, current_time)
for job in tail_of_waiting_list:
if self.cpu_snapshot.canJobStartNow(job, current_time):
job.is_backfilled = 1
self.unscheduled_jobs.remove(job)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
self.cpu_snapshot.unAssignJob(first_job)
return result
示例5: EasyPlusPlusScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class EasyPlusPlusScheduler(Scheduler):
""" This algorithm implements the algorithm in the paper of Tsafrir, Etzion, Feitelson, june 2007?
"""
I_NEED_A_PREDICTOR = True
def __init__(self, options):
super(EasyPlusPlusScheduler, self).__init__(options)
self.init_predictor(options)
self.init_corrector(options)
self.cpu_snapshot = CpuSnapshot(self.num_processors, options["stats"])
self.unscheduled_jobs = []
def new_events_on_job_submission(self, job, current_time):
self.cpu_snapshot.archive_old_slices(current_time)
self.predictor.predict(job, current_time, self.running_jobs)
if not hasattr(job,"initial_prediction"):
job.initial_prediction=job.predicted_run_time
self.unscheduled_jobs.append(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def new_events_on_job_termination(self, job, current_time):
self.predictor.fit(job, current_time)
if self.corrector.__name__=="ninetynine":
self.pestimator.fit(job.actual_run_time/job.user_estimated_run_time)
self.cpu_snapshot.archive_old_slices(current_time)
self.cpu_snapshot.delTailofJobFromCpuSlices(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def new_events_on_job_under_prediction(self, job, current_time):
pass #assert job.predicted_run_time <= job.user_estimated_run_time
if not hasattr(job,"num_underpredict"):
job.num_underpredict = 0
else:
job.num_underpredict += 1
if self.corrector.__name__=="ninetynine":
new_predicted_run_time = self.corrector(self.pestimator,job,current_time)
else:
new_predicted_run_time = self.corrector(job, current_time)
#set the new predicted runtime
self.cpu_snapshot.assignTailofJobToTheCpuSlices(job, new_predicted_run_time)
job.predicted_run_time = new_predicted_run_time
return [JobStartEvent(current_time, job)]
def _schedule_jobs(self, current_time):
"Schedules jobs that can run right now, and returns them"
jobs = self._schedule_head_of_list(current_time)
jobs += self._backfill_jobs(current_time)
return jobs
def _schedule_head_of_list(self, current_time):
result = []
while True:
if len(self.unscheduled_jobs) == 0:
break
# Try to schedule the first job
if self.cpu_snapshot.free_processors_available_at(current_time) >= self.unscheduled_jobs[0].num_required_processors:
job = self.unscheduled_jobs.pop(0)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
# first job can't be scheduled
break
return result
def _backfill_jobs(self, current_time):
if len(self.unscheduled_jobs) <= 1:
return []
result = []
first_job = self.unscheduled_jobs[0]
tail = list_copy(self.unscheduled_jobs[1:])
tail_of_jobs_by_sjf_order = sorted(tail, key=sjf_sort_key)
self.cpu_snapshot.assignJobEarliest(first_job, current_time)
for job in tail_of_jobs_by_sjf_order:
if self.cpu_snapshot.canJobStartNow(job, current_time):
job.is_backfilled = 1
#.........这里部分代码省略.........
示例6: OrigProbabilisticEasyScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class OrigProbabilisticEasyScheduler(Scheduler):
""" This algorithm implements a version of Feitelson and Nissimov, June 2007
"""
def __init__(self, num_processors, threshold = 0.2, window_size=150):
super(OrigProbabilisticEasyScheduler, self).__init__(num_processors)
self.threshold = threshold
self.window_size = window_size # a parameter for the distribution
self.cpu_snapshot = CpuSnapshot(num_processors)
self.user_distribution = {}
self.unscheduled_jobs = []
self.currently_running_jobs = []
#self.work_list = [[None for i in xrange(self.num_processors+1)] for j in xrange(self.num_processors+1)]
self.M = {}
for c in xrange(self.num_processors+1):
for n in xrange(self.num_processors+1):
self.M[c, n] = 0.0
self.max_user_rounded_estimated_run_time = 0
self.prev_max_user_rounded_estimated_run_time = 0
def new_events_on_job_submission(self, job, current_time):
# print "arrived:", job
rounded_up_estimated_time = _round_time_up(job.user_estimated_run_time)
if rounded_up_estimated_time > self.max_user_rounded_estimated_run_time:
self.prev_max_user_rounded_estimated_run_time = self.max_user_rounded_estimated_run_time
self.max_user_rounded_estimated_run_time = rounded_up_estimated_time
if not self.user_distribution.has_key(job.user_id):
self.user_distribution[job.user_id] = Distribution(job, self.window_size)
self.user_distribution[job.user_id].touch(2*self.max_user_rounded_estimated_run_time)
if self.prev_max_user_rounded_estimated_run_time < self.max_user_rounded_estimated_run_time:
for tmp_job in self.currently_running_jobs:
self.user_distribution[tmp_job.user_id].touch(2*self.max_user_rounded_estimated_run_time)
self.cpu_snapshot.archive_old_slices(current_time)
self.unscheduled_jobs.append(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def new_events_on_job_termination(self, job, current_time):
self.user_distribution[job.user_id].add_job(job)
self.currently_running_jobs.remove(job)
self.cpu_snapshot.archive_old_slices(current_time)
self.cpu_snapshot.delTailofJobFromCpuSlices(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def _schedule_jobs(self, current_time):
"Schedules jobs that can run right now, and returns them"
jobs = self._schedule_head_of_list(current_time)
jobs += self._backfill_jobs(current_time)
return jobs
def _schedule_head_of_list(self, current_time):
result = []
while True:
if len(self.unscheduled_jobs) == 0:
break
# Try to schedule the first job
if self.cpu_snapshot.free_processors_available_at(current_time) >= self.unscheduled_jobs[0].num_required_processors:
job = self.unscheduled_jobs.pop(0)
self.currently_running_jobs.append(job)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
# first job can't be scheduled
break
return result
def _backfill_jobs(self, current_time):
if len(self.unscheduled_jobs) <= 1:
return []
result = []
first_job = self.unscheduled_jobs[0]
tail = list_copy(self.unscheduled_jobs[1:])
for job in tail:
if self.can_be_probabilistically_backfilled(job, current_time):
self.unscheduled_jobs.remove(job)
self.currently_running_jobs.append(job)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
return result
#.........这里部分代码省略.........
示例7: EasyPlusPlusScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class EasyPlusPlusScheduler(Scheduler):
""" This algorithm implements the algorithm in the paper of Tsafrir, Etzion, Feitelson, june 2007?
"""
def __init__(self, num_processors):
super(EasyPlusPlusScheduler, self).__init__(num_processors)
self.cpu_snapshot = CpuSnapshot(num_processors)
self.unscheduled_jobs = []
self.user_run_time_prev = {}
self.user_run_time_last = {}
def new_events_on_job_submission(self, job, current_time):
if not self.user_run_time_last.has_key(job.user_id):
self.user_run_time_prev[job.user_id] = None
self.user_run_time_last[job.user_id] = None
self.cpu_snapshot.archive_old_slices(current_time)
self.unscheduled_jobs.append(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def new_events_on_job_termination(self, job, current_time):
assert self.user_run_time_last.has_key(job.user_id) == True
assert self.user_run_time_prev.has_key(job.user_id) == True
self.user_run_time_prev[job.user_id] = self.user_run_time_last[job.user_id]
self.user_run_time_last[job.user_id] = job.actual_run_time
self.cpu_snapshot.archive_old_slices(current_time)
self.cpu_snapshot.delTailofJobFromCpuSlices(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def new_events_on_job_under_prediction(self, job, current_time):
assert job.predicted_run_time <= job.user_estimated_run_time
self.cpu_snapshot.assignTailofJobToTheCpuSlices(job)
job.predicted_run_time = job.user_estimated_run_time
return []
def _schedule_jobs(self, current_time):
"Schedules jobs that can run right now, and returns them"
for job in self.unscheduled_jobs:
if self.user_run_time_prev[job.user_id] != None:
average = int((self.user_run_time_last[job.user_id] + self.user_run_time_prev[job.user_id])/ 2)
job.predicted_run_time = min (job.user_estimated_run_time, average)
jobs = self._schedule_head_of_list(current_time)
jobs += self._backfill_jobs(current_time)
return jobs
def _schedule_head_of_list(self, current_time):
result = []
while True:
if len(self.unscheduled_jobs) == 0:
break
# Try to schedule the first job
if self.cpu_snapshot.free_processors_available_at(current_time) >= self.unscheduled_jobs[0].num_required_processors:
job = self.unscheduled_jobs.pop(0)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
# first job can't be scheduled
break
return result
def _backfill_jobs(self, current_time):
if len(self.unscheduled_jobs) <= 1:
return []
result = []
first_job = self.unscheduled_jobs[0]
tail = list_copy(self.unscheduled_jobs[1:])
tail_of_jobs_by_sjf_order = sorted(tail, key=sjf_sort_key)
self.cpu_snapshot.assignJobEarliest(first_job, current_time)
for job in tail_of_jobs_by_sjf_order:
if self.cpu_snapshot.canJobStartNow(job, current_time):
self.unscheduled_jobs.remove(job)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
self.cpu_snapshot.delJobFromCpuSlices(first_job)
return result
示例8: EasyBackfillScheduler
# 需要导入模块: from common import CpuSnapshot [as 别名]
# 或者: from common.CpuSnapshot import free_processors_available_at [as 别名]
class EasyBackfillScheduler(Scheduler):
def __init__(self, num_processors):
super(EasyBackfillScheduler, self).__init__(num_processors)
self.cpu_snapshot = CpuSnapshot(num_processors)
self.unscheduled_jobs = []
def new_events_on_job_submission(self, just_submitted_job, current_time):
""" Here we first add the new job to the waiting list. We then try to schedule
the jobs in the waiting list, returning a collection of new termination events """
# TODO: a probable performance bottleneck because we reschedule all the
# jobs. Knowing that only one new job is added allows more efficient
# scheduling here.
#print 'User submits', just_submitted_job
self.cpu_snapshot.archive_old_slices(current_time)
self.unscheduled_jobs.append(just_submitted_job)
#print 'At time', current_time, 'unscheduled:', self.unscheduled_jobs
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def new_events_on_job_termination(self, job, current_time):
""" Here we first delete the tail of the just terminated job (in case it's
done before user estimation time). We then try to schedule the jobs in the waiting list,
returning a collection of new termination events """
self.cpu_snapshot.archive_old_slices(current_time)
self.cpu_snapshot.delTailofJobFromCpuSlices(job)
return [
JobStartEvent(current_time, job)
for job in self._schedule_jobs(current_time)
]
def _schedule_jobs(self, current_time):
"Schedules jobs that can run right now, and returns them"
jobs = self._schedule_head_of_list(current_time)
jobs += self._backfill_jobs(current_time)
#print 'Currently schedulable jobs:', jobs
#print 'Currently schedulable jobs count:', len(jobs)
self.totalScheduledJobs += len(jobs)
return jobs
def _schedule_head_of_list(self, current_time):
result = []
while True:
if len(self.unscheduled_jobs) == 0:
break
# Try to schedule the first job
if self.cpu_snapshot.free_processors_available_at(current_time) >= self.unscheduled_jobs[0].num_required_processors:
job = self.unscheduled_jobs.pop(0)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
else:
# first job can't be scheduled
break
return result
def _backfill_jobs(self, current_time):
"""
Find jobs that can be backfilled and update the cpu snapshot.
"""
if len(self.unscheduled_jobs) <= 1:
return []
result = []
tail_of_waiting_list = list_copy(self.unscheduled_jobs[1:])
for job in tail_of_waiting_list:
if self.canBeBackfilled(job, current_time):
self.unscheduled_jobs.remove(job)
self.cpu_snapshot.assignJob(job, current_time)
result.append(job)
return result
def canBeBackfilled(self, second_job, current_time):
assert len(self.unscheduled_jobs) >= 2
assert second_job in self.unscheduled_jobs[1:]
if self.cpu_snapshot.free_processors_available_at(current_time) < second_job.num_required_processors:
return False
first_job = self.unscheduled_jobs[0]
temp_cpu_snapshot = self.cpu_snapshot.copy()
temp_cpu_snapshot.assignJobEarliest(first_job, current_time)
# if true, this means that the 2nd job is "independent" of the 1st, and thus can be backfilled
return temp_cpu_snapshot.canJobStartNow(second_job, current_time)
开发者ID:kruthikavishwanath,项目名称:project-to-predict-wait-times-in-tyrone-cluster,代码行数:93,代码来源:easy_scheduler.py