本文整理汇总了Python中radical.entk.Task.name方法的典型用法代码示例。如果您正苦于以下问题:Python Task.name方法的具体用法?Python Task.name怎么用?Python Task.name使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类radical.entk.Task
的用法示例。
在下文中一共展示了Task.name方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def generate_pipeline(name, stages):
# Create a Pipeline object
p = Pipeline()
p.name = name
for s_cnt in range(stages):
# Create a Stage object
s = Stage()
s.name = 'Stage %s'%s_cnt
for t_cnt in range(5):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional)
t.executable = '/bin/echo' # Assign executable to the task
# Assign arguments for the task executable
t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)]
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
return p
示例2: create_single_task
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
示例3: create_single_task
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/echo']
t1.arguments = ['hello']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
示例4: get_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def get_pipeline(shared_fs=False, size=1):
p = Pipeline()
p.name = 'p'
n = 4
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
# dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>
t.executable = 'dd'
if not shared_fs:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
else:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 1024
s1.add_tasks(t)
p.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['dd']
if not shared_fs:
t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
else:
t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.tag = 't%s'%x
s2.add_tasks(t)
p.add_stages(s2)
return p
示例5: create_task_from_cu
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def create_task_from_cu(cu, prof=None):
"""
Purpose: Create a Task based on the Compute Unit.
Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was
converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD.
Also, this is not required for the most part.
TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU
:arguments:
:cu: RP Compute Unit
:return: Task
"""
try:
logger.debug('Create Task from CU %s' % cu.name)
if prof:
prof.prof('task from cu - create',
uid=cu.name.split(',')[0].strip())
task = Task()
task.uid = cu.name.split(',')[0].strip()
task.name = cu.name.split(',')[1].strip()
task.parent_stage['uid'] = cu.name.split(',')[2].strip()
task.parent_stage['name'] = cu.name.split(',')[3].strip()
task.parent_pipeline['uid'] = cu.name.split(',')[4].strip()
task.parent_pipeline['name'] = cu.name.split(',')[5].strip()
task.rts_uid = cu.uid
if cu.state == rp.DONE:
task.exit_code = 0
else:
task.exit_code = 1
task.path = ru.Url(cu.sandbox).path
if prof:
prof.prof('task from cu - done', uid=cu.name.split(',')[0].strip())
logger.debug('Task %s created from CU %s' % (task.uid, cu.name))
return task
except Exception, ex:
logger.exception('Task creation from CU failed, error: %s' % ex)
raise
示例6: create_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['sleep']
t1.arguments = ['10']
s.add_tasks(t1)
p.add_stages(s)
return p
示例7: create_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/echo']
t1.arguments = ['hello']
t1.copy_input_data = []
t1.copy_output_data = []
s.add_tasks(t1)
p.add_stages(s)
return p
示例8: generate_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def generate_pipeline():
# Create a Pipeline object
p = Pipeline()
p.name = 'p1'
# Create a Stage object
s1 = Stage()
s1.name = 's1'
# Create a Task object which creates a file named 'output.txt' of size 1 MB
t1 = Task()
t1.name = 't1'
t1.executable = ['/bin/false']
# t1.arguments = ['"Hello World"','>>','temp.txt']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add Stage to the Pipeline
p.add_stages(s1)
return p
示例9: test_amgr_run_mock
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def test_amgr_run_mock():
p = Pipeline()
s = Stage()
t = Task()
t.name = 'simulation'
t.executable = ['/bin/date']
s.tasks = t
p.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
appman = Amgr(hostname=hostname, port=port, rts="mock")
appman.resource_desc = res_dict
appman.workflow = [p]
appman.run()
示例10: generate_pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def generate_pipeline():
# Create a Pipeline object
p = Pipeline()
p.name = 'p1'
# Create a Stage object
s1 = Stage()
s1.name = 's1'
# Create a Task object which creates a file named 'output.txt' of size 1 MB
t1 = Task()
t1.name = 't1'
t1.executable = '/bin/bash'
t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add Stage to the Pipeline
p.add_stages(s1)
# Create another Stage object to hold character count tasks
s2 = Stage()
s2.name = 's2'
s2_task_uids = []
for cnt in range(30):
# Create a Task object
t2 = Task()
t2.name = 't%s' % (cnt + 1)
t2.executable = '/bin/bash'
t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt']
# Copy data from the task in the first stage to the current task's location
t2.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/output.txt' % (p.name, s1.name, t1.name)]
# Add the Task to the Stage
s2.add_tasks(t2)
s2_task_uids.append(t2.name)
# Add Stage to the Pipeline
p.add_stages(s2)
# Create another Stage object to hold checksum tasks
s3 = Stage()
s3.name = 's3'
for cnt in range(30):
# Create a Task object
t3 = Task()
t3.name = 't%s' % (cnt + 1)
t3.executable = '/bin/bash'
t3.arguments = ['-l', '-c', 'sha1sum ccount.txt > chksum.txt']
# Copy data from the task in the first stage to the current task's location
t3.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/ccount.txt' % (p.name, s2.name, s2_task_uids[cnt])]
# Download the output of the current task to the current location
t3.download_output_data = ['chksum.txt > chksum_%s.txt' % cnt]
# Add the Task to the Stage
s3.add_tasks(t3)
# Add Stage to the Pipeline
p.add_stages(s3)
return p
示例11: Pipeline
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
# VM, set "RMQ_HOSTNAME" and "RMQ_PORT" in the session where you are running
# this script.
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = os.environ.get('RMQ_PORT', 5672)
if __name__ == '__main__':
# Create a Pipeline object
p = Pipeline()
# Create a Stage object
s = Stage()
# Create a Task object
t = Task()
t.name = 'my-first-task' # Assign a name to the task (optional, do not use ',' or '_')
t.executable = '/bin/echo' # Assign executable to the task
t.arguments = ['Hello World'] # Assign arguments for the task executable
# Add Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
# Create Application Manager
appman = AppManager(hostname=hostname, port=port)
# Create a dictionary describe four mandatory keys:
# resource, walltime, and cpus
# resource is 'local.localhost' to execute locally
示例12: test_rp_da_scheduler_bw
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def test_rp_da_scheduler_bw():
"""
**Purpose**: Run an EnTK application on localhost
"""
p1 = Pipeline()
p1.name = 'p1'
n = 10
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
t.executable = ['/bin/hostname']
t.arguments = ['>','hostname.txt']
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 16
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 10
t.download_output_data = ['hostname.txt > s1_t%s_hostname.txt'%(x)]
s1.add_tasks(t)
p1.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['/bin/hostname']
t.arguments = ['>','hostname.txt']
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 16
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.download_output_data = ['hostname.txt > s2_t%s_hostname.txt'%(x)]
t.tag = 't%s'%x
s2.add_tasks(t)
p1.add_stages(s2)
res_dict = {
'resource' : 'ncsa.bw_aprun',
'walltime' : 10,
'cpus' : 128,
'project' : 'gk4',
'queue' : 'high'
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
for i in range(n):
assert open('s1_t%s_hostname.txt'%i,'r').readline().strip() == open('s2_t%s_hostname.txt'%i,'r').readline().strip()
txts = glob('%s/*.txt' % os.getcwd())
for f in txts:
os.remove(f)
示例13: init_cycle
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def init_cycle(self, replicas, replica_cores, python_path, md_executable, exchange_method, min_temp, max_temp, timesteps, basename, pre_exec): # "cycle" = 1 MD stage plus the subsequent exchange computation
"""
Initial cycle consists of:
1) Create tarball of MD input data
2) Transfer the tarball to pilot sandbox
3) Untar the tarball
4) Run first cycle
"""
#Initialize Pipeline
self._prof.prof('InitTar', uid=self._uid)
p = Pipeline()
p.name = 'initpipeline'
md_dict = dict() #bookkeeping
tar_dict = dict() #bookkeeping
#Write the input files
self._prof.prof('InitWriteInputs', uid=self._uid)
writeInputs.writeInputs(
max_temp=max_temp,
min_temp=min_temp,
replicas=replicas,
timesteps=timesteps,
basename=basename)
self._prof.prof('EndWriteInputs', uid=self._uid)
self._prof.prof('InitTar', uid=self._uid)
#Create Tarball of input data
tar = tarfile.open("input_files.tar", "w")
for name in [
basename + ".prmtop", basename + ".inpcrd", basename + ".mdin"
]:
tar.add(name)
for r in range(replicas):
tar.add('mdin_{0}'.format(r))
tar.close()
#delete all input files outside the tarball
for r in range(replicas):
os.remove('mdin_{0}'.format(r))
self._prof.prof('EndTar', uid=self._uid)
#Create Untar Stage
repo = git.Repo('.', search_parent_directories=True)
aux_function_path = repo.working_tree_dir
untar_stg = Stage()
untar_stg.name = 'untarStg'
#Untar Task
untar_tsk = Task()
untar_tsk.name = 'untartsk'
untar_tsk.executable = ['python']
untar_tsk.upload_input_data = [
str(aux_function_path)+'/repex/untar_input_files.py', 'input_files.tar'
]
untar_tsk.arguments = ['untar_input_files.py', 'input_files.tar']
untar_tsk.cpu_reqs = 1
#untar_tsk.post_exec = ['']
untar_stg.add_tasks(untar_tsk)
p.add_stages(untar_stg)
tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s' % (
p.name, untar_stg.name, untar_tsk.name)
# First MD stage: needs to be defined separately since workflow is not built from a predetermined order, also equilibration needs to happen first.
md_stg = Stage()
md_stg.name = 'mdstg0'
self._prof.prof('InitMD_0', uid=self._uid)
# MD tasks
for r in range(replicas):
md_tsk = AMBERTask(cores=replica_cores, md_executable=md_executable, pre_exec=pre_exec)
md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=r, cycle=0)
md_tsk.link_input_data += [
'%s/inpcrd' % tar_dict[0],
'%s/prmtop' % tar_dict[0],
'%s/mdin_{0}'.format(r) %
tar_dict[0] #Use for full temperature exchange
]
md_tsk.arguments = [
'-O',
'-p',
'prmtop',
'-i',
'mdin_{0}'.format(r),
#.........这里部分代码省略.........
示例14: general_cycle
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def general_cycle(self, replicas, replica_cores, cycle, python_path, md_executable, exchange_method, pre_exec):
"""
All cycles after the initial cycle
Pulls up exchange pairs file and generates the new workflow
"""
self._prof.prof('InitcreateMDwokflow_{0}'.format(cycle), uid=self._uid)
with open('exchangePairs_{0}.dat'.format(cycle),
'r') as f: # Read exchangePairs.dat
exchange_array = []
for line in f:
exchange_array.append(int(line.split()[1]))
#exchange_array.append(line)
#print exchange_array
q = Pipeline()
q.name = 'genpipeline{0}'.format(cycle)
#bookkeeping
stage_uids = list()
task_uids = list() ## = dict()
md_dict = dict()
#Create MD stage
md_stg = Stage()
md_stg.name = 'mdstage{0}'.format(cycle)
self._prof.prof('InitMD_{0}'.format(cycle), uid=self._uid)
for r in range(replicas):
md_tsk = AMBERTask(cores=replica_cores, md_executable=md_executable, pre_exec=pre_exec)
md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(
replica=r, cycle=cycle)
md_tsk.link_input_data = [
'%s/restrt > inpcrd' %
(self.book[cycle - 1][exchange_array[r]]),
'%s/prmtop' % (self.book[0][r]),
'%s/mdin_{0}'.format(r) % (self.book[0][r])
]
### The Following softlinking scheme is to be used ONLY if node local file system is to be used: not fully supported yet.
#md_tsk.link_input_data = ['$NODE_LFS_PATH/rstrt-{replica}-{cycle}'.format(replica=exchange_array[r],cycle=cycle-1) > '$NODE_LFS_PATH/inpcrd',
# #'%s/restrt > inpcrd'%(self.book[cycle-1][exchange_array[r]]),
# '%s/prmtop'%(self.book[0][r]),
# '%s/mdin_{0}'.format(r)%(self.Book[0][r])]
md_tsk.arguments = [
'-O',
'-i',
'mdin_{0}'.format(r),
'-p',
'prmtop',
'-c',
'inpcrd',
#'-c', 'rstrt-{replica}-{cycle}'.format(replica=r,cycle=cycle-1),
'-o',
'out-{replica}-{cycle}'.format(replica=r, cycle=cycle),
'-r',
'restrt',
#'-r', 'rstrt-{replica}-{cycle}'.format(replica=r,cycle=cycle),
'-x',
'mdcrd-{replica}-{cycle}'.format(replica=r, cycle=cycle),
'-inf',
'mdinfo_{0}'.format(r)
]
#md_tsk.tag = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=0)
md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s' % (
q.name, md_stg.name, md_tsk.name)
self.md_task_list.append(md_tsk)
md_stg.add_tasks(md_tsk)
q.add_stages(md_stg)
ex_stg = Stage()
ex_stg.name = 'exstg{0}'.format(cycle + 1)
#Create Exchange Task
ex_tsk = Task()
ex_tsk.name = 'extsk{0}'.format(cycle + 1)
ex_tsk.executable = [python_path]#['/usr/bin/python'] #['/opt/python/bin/python']
ex_tsk.upload_input_data = [exchange_method]
for r in range(replicas):
ex_tsk.link_input_data += ['%s/mdinfo_%s' % (md_dict[r], r)]
ex_tsk.pre_exec = ['mv *.py exchange_method.py']
ex_tsk.arguments = [
'exchange_method.py', '{0}'.format(replicas), '{0}'.format(cycle + 1)
]
ex_tsk.cores = 1
ex_tsk.mpi = False
ex_tsk.download_output_data = [
'exchangePairs_{0}.dat'.format(cycle + 1)
] # Finds exchange partners, also Generates exchange history trace
ex_stg.add_tasks(ex_tsk)
#task_uids.append(ex_tsk.uid)
self.ex_task_list.append(ex_tsk)
q.add_stages(ex_stg)
#.........这里部分代码省略.........
示例15: InitCycle
# 需要导入模块: from radical.entk import Task [as 别名]
# 或者: from radical.entk.Task import name [as 别名]
def InitCycle(self, Replicas, Replica_Cores, md_executable, ExchangeMethod, timesteps): # "Cycle" = 1 MD stage plus the subsequent exchange computation
"""
Initial cycle consists of:
1) Create tarball of MD input data
2) Transfer the tarball to pilot sandbox
3) Untar the tarball
4) Run first Cycle
"""
#Initialize Pipeline
#self._prof.prof('InitTar', uid=self._uid)
p = Pipeline()
p.name = 'initpipeline'
md_dict = dict() #Bookkeeping
tar_dict = dict() #Bookkeeping
##Write the input files
self._prof.prof('InitWriteInputs', uid=self._uid)
writeInputs.writeInputs(max_temp=350,min_temp=250,replicas=Replicas,timesteps=timesteps)
self._prof.prof('EndWriteInputs', uid=self._uid)
self._prof.prof('InitTar', uid=self._uid)
#Create Tarball of input data
tar = tarfile.open("Input_Files.tar","w")
for name in ["prmtop", "inpcrd", "mdin"]:
tar.add(name)
for r in range (Replicas):
tar.add('mdin_{0}'.format(r))
tar.close()
#delete all input files outside the tarball
for r in range (Replicas):
os.remove('mdin_{0}'.format(r))
self._prof.prof('EndTar', uid=self._uid)
#Create Untar Stage
untar_stg = Stage()
untar_stg.name = 'untarStg'
#Untar Task
untar_tsk = Task()
untar_tsk.name = 'untartsk'
untar_tsk.executable = ['python']
untar_tsk.upload_input_data = ['untar_input_files.py','Input_Files.tar']
untar_tsk.arguments = ['untar_input_files.py','Input_Files.tar']
untar_tsk.cores = 1
untar_stg.add_tasks(untar_tsk)
p.add_stages(untar_stg)
tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.name,
untar_stg.name,
untar_tsk.name)
# First MD stage: needs to be defined separately since workflow is not built from a predetermined order
md_stg = Stage()
md_stg.name = 'mdstg0'
self._prof.prof('InitMD_0', uid=self._uid)
# MD tasks
for r in range (Replicas):
md_tsk = AMBERTask(cores=Replica_Cores, MD_Executable=md_executable)
md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=0)
md_tsk.link_input_data += [
'%s/inpcrd'%tar_dict[0],
'%s/prmtop'%tar_dict[0],
'%s/mdin_{0}'.format(r)%tar_dict[0] #Use for full temperature exchange
#'%s/mdin'%tar_dict[0] #Testing only
]
md_tsk.arguments = ['-O','-p','prmtop', '-i', 'mdin_{0}'.format(r), # Use this for full Temperature Exchange
'-c','inpcrd','-o','out_{0}'.format(r),
'-inf','mdinfo_{0}'.format(r)]
md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.name, md_stg.name, md_tsk.name)
md_stg.add_tasks(md_tsk)
self.md_task_list.append(md_tsk)
#print md_tsk.uid
p.add_stages(md_stg)
#.........这里部分代码省略.........