本文整理汇总了Python中radical.entk.Task.cpu_reqs方法的典型用法代码示例。如果您正苦于以下问题:Python Task.cpu_reqs方法的具体用法?Python Task.cpu_reqs怎么用?Python Task.cpu_reqs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类radical.entk.Task
的用法示例。
在下文中一共展示了Task.cpu_reqs方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_issue_239
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs [as 别名]
def test_issue_239():
t = Task()
t.cpu_reqs = {'processes': 1}
assert t.cpu_reqs == { 'processes': 1,
'thread_type': None,
'threads_per_process': 1,
'process_type': None}
t.cpu_reqs = {'threads_per_process': 1}
assert t.cpu_reqs == { 'processes': 1,
'thread_type': None,
'threads_per_process': 1,
'process_type': None}
t.gpu_reqs = {'processes': 1}
assert t.gpu_reqs == { 'processes': 1,
'thread_type': None,
'threads_per_process': 1,
'process_type': None}
t.gpu_reqs = {'threads_per_process': 1}
assert t.gpu_reqs == { 'processes': 1,
'thread_type': None,
'threads_per_process': 1,
'process_type': None}
示例2: init_cycle
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs [as 别名]
def init_cycle(self, replicas, replica_cores, python_path, md_executable, exchange_method, min_temp, max_temp, timesteps, basename, pre_exec): # "cycle" = 1 MD stage plus the subsequent exchange computation
"""
Initial cycle consists of:
1) Create tarball of MD input data
2) Transfer the tarball to pilot sandbox
3) Untar the tarball
4) Run first cycle
"""
#Initialize Pipeline
self._prof.prof('InitTar', uid=self._uid)
p = Pipeline()
p.name = 'initpipeline'
md_dict = dict() #bookkeeping
tar_dict = dict() #bookkeeping
#Write the input files
self._prof.prof('InitWriteInputs', uid=self._uid)
writeInputs.writeInputs(
max_temp=max_temp,
min_temp=min_temp,
replicas=replicas,
timesteps=timesteps,
basename=basename)
self._prof.prof('EndWriteInputs', uid=self._uid)
self._prof.prof('InitTar', uid=self._uid)
#Create Tarball of input data
tar = tarfile.open("input_files.tar", "w")
for name in [
basename + ".prmtop", basename + ".inpcrd", basename + ".mdin"
]:
tar.add(name)
for r in range(replicas):
tar.add('mdin_{0}'.format(r))
tar.close()
#delete all input files outside the tarball
for r in range(replicas):
os.remove('mdin_{0}'.format(r))
self._prof.prof('EndTar', uid=self._uid)
#Create Untar Stage
repo = git.Repo('.', search_parent_directories=True)
aux_function_path = repo.working_tree_dir
untar_stg = Stage()
untar_stg.name = 'untarStg'
#Untar Task
untar_tsk = Task()
untar_tsk.name = 'untartsk'
untar_tsk.executable = ['python']
untar_tsk.upload_input_data = [
str(aux_function_path)+'/repex/untar_input_files.py', 'input_files.tar'
]
untar_tsk.arguments = ['untar_input_files.py', 'input_files.tar']
untar_tsk.cpu_reqs = 1
#untar_tsk.post_exec = ['']
untar_stg.add_tasks(untar_tsk)
p.add_stages(untar_stg)
tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s' % (
p.name, untar_stg.name, untar_tsk.name)
# First MD stage: needs to be defined separately since workflow is not built from a predetermined order, also equilibration needs to happen first.
md_stg = Stage()
md_stg.name = 'mdstg0'
self._prof.prof('InitMD_0', uid=self._uid)
# MD tasks
for r in range(replicas):
md_tsk = AMBERTask(cores=replica_cores, md_executable=md_executable, pre_exec=pre_exec)
md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=r, cycle=0)
md_tsk.link_input_data += [
'%s/inpcrd' % tar_dict[0],
'%s/prmtop' % tar_dict[0],
'%s/mdin_{0}'.format(r) %
tar_dict[0] #Use for full temperature exchange
]
md_tsk.arguments = [
'-O',
'-p',
'prmtop',
'-i',
'mdin_{0}'.format(r),
#.........这里部分代码省略.........
示例3: test_task_exceptions
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs [as 别名]
def test_task_exceptions(s,l,i,b):
"""
**Purpose**: Test if all attribute assignments raise exceptions for invalid values
"""
t = Task()
data_type = [s,l,i,b]
for data in data_type:
if not isinstance(data,str):
with pytest.raises(TypeError):
t.name = data
with pytest.raises(TypeError):
t.path = data
with pytest.raises(TypeError):
t.parent_stage = data
with pytest.raises(TypeError):
t.parent_pipeline = data
with pytest.raises(TypeError):
t.stdout = data
with pytest.raises(TypeError):
t.stderr = data
if not isinstance(data,list):
with pytest.raises(TypeError):
t.pre_exec = data
with pytest.raises(TypeError):
t.arguments = data
with pytest.raises(TypeError):
t.post_exec = data
with pytest.raises(TypeError):
t.upload_input_data = data
with pytest.raises(TypeError):
t.copy_input_data = data
with pytest.raises(TypeError):
t.link_input_data = data
with pytest.raises(TypeError):
t.move_input_data = data
with pytest.raises(TypeError):
t.copy_output_data = data
with pytest.raises(TypeError):
t.download_output_data = data
with pytest.raises(TypeError):
t.move_output_data = data
if not isinstance(data, str) and not isinstance(data, list):
with pytest.raises(TypeError):
t.executable = data
if not isinstance(data, str) and not isinstance(data, unicode):
with pytest.raises(ValueError):
t.cpu_reqs = {
'processes': 1,
'process_type': data,
'threads_per_process': 1,
'thread_type': None
}
t.cpu_reqs = {
'processes': 1,
'process_type': None,
'threads_per_process': 1,
'thread_type': data
}
t.gpu_reqs = {
'processes': 1,
'process_type': data,
'threads_per_process': 1,
'thread_type': None
}
t.gpu_reqs = {
'processes': 1,
'process_type': None,
'threads_per_process': 1,
'thread_type': data
}
if not isinstance(data, int):
with pytest.raises(TypeError):
t.cpu_reqs = {
#.........这里部分代码省略.........
示例4: test_create_cud_from_task
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs [as 别名]
def test_create_cud_from_task():
"""
**Purpose**: Test if the 'create_cud_from_task' function generates a RP ComputeUnitDescription with the complete
Task description
"""
pipeline = 'p1'
stage = 's1'
task = 't1'
placeholder_dict = {
pipeline: {
stage: {
task: '/home/vivek/some_file.txt'
}
}
}
t1 = Task()
t1.name = 't1'
t1.pre_exec = ['module load gromacs']
t1.executable = ['grompp']
t1.arguments = ['hello']
t1.cpu_reqs = {'processes': 4,
'process_type': 'MPI',
'threads_per_process': 1,
'thread_type': 'OpenMP'
}
t1.gpu_reqs = {'processes': 4,
'process_type': 'MPI',
'threads_per_process': 2,
'thread_type': 'OpenMP'
}
t1.post_exec = ['echo test']
t1.upload_input_data = ['upload_input.dat']
t1.copy_input_data = ['copy_input.dat']
t1.link_input_data = ['link_input.dat']
t1.copy_output_data = ['copy_output.dat']
t1.download_output_data = ['download_output.dat']
p = Pipeline()
p.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = t1
p.stages = s
p._assign_uid('test')
cud = create_cud_from_task(t1, placeholder_dict)
assert cud.name == '%s,%s,%s,%s,%s,%s' % (t1.uid, t1.name,
t1.parent_stage['uid'], t1.parent_stage['name'],
t1.parent_pipeline['uid'], t1.parent_pipeline['name'])
assert cud.pre_exec == t1.pre_exec
# rp returns executable as a string regardless of whether assignment was using string or list
assert cud.executable == t1.executable
assert cud.arguments == t1.arguments
assert cud.cpu_processes == t1.cpu_reqs['processes']
assert cud.cpu_threads == t1.cpu_reqs['threads_per_process']
assert cud.cpu_process_type == t1.cpu_reqs['process_type']
assert cud.cpu_thread_type == t1.cpu_reqs['thread_type']
assert cud.gpu_processes == t1.gpu_reqs['processes']
assert cud.gpu_threads == t1.gpu_reqs['threads_per_process']
assert cud.gpu_process_type == t1.gpu_reqs['process_type']
assert cud.gpu_thread_type == t1.gpu_reqs['thread_type']
assert cud.post_exec == t1.post_exec
assert {'source': 'upload_input.dat', 'target': 'upload_input.dat'} in cud.input_staging
assert {'source': 'copy_input.dat', 'action': rp.COPY, 'target': 'copy_input.dat'} in cud.input_staging
assert {'source': 'link_input.dat', 'action': rp.LINK, 'target': 'link_input.dat'} in cud.input_staging
assert {'source': 'copy_output.dat', 'action': rp.COPY, 'target': 'copy_output.dat'} in cud.output_staging
assert {'source': 'download_output.dat', 'target': 'download_output.dat'} in cud.output_staging