本文整理汇总了Python中radical.entk.Task.cpu_reqs['threads_per_process']方法的典型用法代码示例。如果您正苦于以下问题:Python Task.cpu_reqs['threads_per_process']方法的具体用法?Python Task.cpu_reqs['threads_per_process']怎么用?Python Task.cpu_reqs['threads_per_process']使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类radical.entk.Task
的用法示例。
在下文中一共展示了Task.cpu_reqs['threads_per_process']方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs['threads_per_process'] [as 别名]
def get_pipeline(shared_fs=False, size=1):
p = Pipeline()
p.name = 'p'
n = 4
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
# dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>
t.executable = 'dd'
if not shared_fs:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
else:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 1024
s1.add_tasks(t)
p.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['dd']
if not shared_fs:
t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
else:
t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.tag = 't%s'%x
s2.add_tasks(t)
p.add_stages(s2)
return p
示例2: test_task_to_dict
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs['threads_per_process'] [as 别名]
def test_task_to_dict():
"""
**Purpose**: Test if the 'to_dict' function of Task class converts all expected attributes of the Task into a
dictionary
"""
t = Task()
d = t.to_dict()
assert d == { 'uid': None,
'name': None,
'state': states.INITIAL,
'state_history': [states.INITIAL],
'pre_exec': [],
'executable': str(),
'arguments': [],
'post_exec': [],
'cpu_reqs': { 'processes': 1,
'process_type': None,
'threads_per_process': 1,
'thread_type': None
},
'gpu_reqs': { 'processes': 0,
'process_type': None,
'threads_per_process': 0,
'thread_type': None
},
'lfs_per_process': 0,
'upload_input_data': [],
'copy_input_data': [],
'link_input_data': [],
'move_input_data': [],
'copy_output_data': [],
'move_output_data': [],
'download_output_data': [],
'stdout': None,
'stderr': None,
'exit_code': None,
'path': None,
'tag': None,
'parent_stage': {'uid':None, 'name': None},
'parent_pipeline': {'uid':None, 'name': None}}
t = Task()
t.uid = 'test.0000'
t.name = 'new'
t.pre_exec = ['module load abc']
t.executable = ['sleep']
t.arguments = ['10']
t.cpu_reqs['processes'] = 10
t.cpu_reqs['threads_per_process'] = 2
t.gpu_reqs['processes'] = 5
t.gpu_reqs['threads_per_process'] = 3
t.lfs_per_process = 1024
t.upload_input_data = ['test1']
t.copy_input_data = ['test2']
t.link_input_data = ['test3']
t.move_input_data = ['test4']
t.copy_output_data = ['test5']
t.move_output_data = ['test6']
t.download_output_data = ['test7']
t.stdout = 'out'
t.stderr = 'err'
t.exit_code = 1
t.path = 'a/b/c'
t.tag = 'task.0010'
t.parent_stage = {'uid': 's1', 'name': 'stage1'}
t.parent_pipeline = {'uid': 'p1', 'name': 'pipeline1'}
d = t.to_dict()
assert d == { 'uid': 'test.0000',
'name': 'new',
'state': states.INITIAL,
'state_history': [states.INITIAL],
'pre_exec': ['module load abc'],
'executable': 'sleep',
'arguments': ['10'],
'post_exec': [],
'cpu_reqs': { 'processes': 10,
'process_type': None,
'threads_per_process': 2,
'thread_type': None
},
'gpu_reqs': { 'processes': 5,
'process_type': None,
'threads_per_process': 3,
'thread_type': None
},
'lfs_per_process': 1024,
'upload_input_data': ['test1'],
'copy_input_data': ['test2'],
'link_input_data': ['test3'],
'move_input_data': ['test4'],
'copy_output_data': ['test5'],
'move_output_data': ['test6'],
'download_output_data': ['test7'],
'stdout': 'out',
#.........这里部分代码省略.........
示例3: test_rp_da_scheduler_bw
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs['threads_per_process'] [as 别名]
def test_rp_da_scheduler_bw():
"""
**Purpose**: Run an EnTK application on localhost
"""
p1 = Pipeline()
p1.name = 'p1'
n = 10
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
t.executable = ['/bin/hostname']
t.arguments = ['>','hostname.txt']
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 16
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 10
t.download_output_data = ['hostname.txt > s1_t%s_hostname.txt'%(x)]
s1.add_tasks(t)
p1.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['/bin/hostname']
t.arguments = ['>','hostname.txt']
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 16
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.download_output_data = ['hostname.txt > s2_t%s_hostname.txt'%(x)]
t.tag = 't%s'%x
s2.add_tasks(t)
p1.add_stages(s2)
res_dict = {
'resource' : 'ncsa.bw_aprun',
'walltime' : 10,
'cpus' : 128,
'project' : 'gk4',
'queue' : 'high'
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
for i in range(n):
assert open('s1_t%s_hostname.txt'%i,'r').readline().strip() == open('s2_t%s_hostname.txt'%i,'r').readline().strip()
txts = glob('%s/*.txt' % os.getcwd())
for f in txts:
os.remove(f)