本文整理汇总了Python中radical.entk.Task.cpu_reqs['process_type']方法的典型用法代码示例。如果您正苦于以下问题:Python Task.cpu_reqs['process_type']方法的具体用法?Python Task.cpu_reqs['process_type']怎么用?Python Task.cpu_reqs['process_type']使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类radical.entk.Task
的用法示例。
在下文中一共展示了Task.cpu_reqs['process_type']方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs['process_type'] [as 别名]
def get_pipeline(shared_fs=False, size=1):
p = Pipeline()
p.name = 'p'
n = 4
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
# dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>
t.executable = 'dd'
if not shared_fs:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
else:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 1024
s1.add_tasks(t)
p.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['dd']
if not shared_fs:
t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
else:
t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.tag = 't%s'%x
s2.add_tasks(t)
p.add_stages(s2)
return p
示例2: test_rp_da_scheduler_bw
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import cpu_reqs['process_type'] [as 别名]
def test_rp_da_scheduler_bw():
"""
**Purpose**: Run an EnTK application on localhost
"""
p1 = Pipeline()
p1.name = 'p1'
n = 10
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
t.executable = ['/bin/hostname']
t.arguments = ['>','hostname.txt']
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 16
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 10
t.download_output_data = ['hostname.txt > s1_t%s_hostname.txt'%(x)]
s1.add_tasks(t)
p1.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['/bin/hostname']
t.arguments = ['>','hostname.txt']
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 16
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.download_output_data = ['hostname.txt > s2_t%s_hostname.txt'%(x)]
t.tag = 't%s'%x
s2.add_tasks(t)
p1.add_stages(s2)
res_dict = {
'resource' : 'ncsa.bw_aprun',
'walltime' : 10,
'cpus' : 128,
'project' : 'gk4',
'queue' : 'high'
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
for i in range(n):
assert open('s1_t%s_hostname.txt'%i,'r').readline().strip() == open('s2_t%s_hostname.txt'%i,'r').readline().strip()
txts = glob('%s/*.txt' % os.getcwd())
for f in txts:
os.remove(f)