本文整理汇总了Python中radical.entk.Stage.add_tasks方法的典型用法代码示例。如果您正苦于以下问题:Python Stage.add_tasks方法的具体用法?Python Stage.add_tasks怎么用?Python Stage.add_tasks使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类radical.entk.Stage
的用法示例。
在下文中一共展示了Stage.add_tasks方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_pipeline
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def generate_pipeline(name, stages):
# Create a Pipeline object
p = Pipeline()
p.name = name
for s_cnt in range(stages):
# Create a Stage object
s = Stage()
s.name = 'Stage %s'%s_cnt
for t_cnt in range(5):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional)
t.executable = '/bin/echo' # Assign executable to the task
# Assign arguments for the task executable
t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)]
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
return p
示例2: test_wfp_check_processor
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_wfp_check_processor():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp.start_processor()
assert wfp.check_processor()
wfp.terminate_processor()
assert not wfp.check_processor()
示例3: test_stage_task_addition
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_stage_task_addition():
s = Stage()
t1 = Task()
t1.executable = ['/bin/date']
t2 = Task()
t2.executable = ['/bin/date']
s.add_tasks(set([t1, t2]))
assert type(s.tasks) == set
assert s._task_count == 2
assert t1 in s.tasks
assert t2 in s.tasks
s = Stage()
t1 = Task()
t1.executable = ['/bin/date']
t2 = Task()
t2.executable = ['/bin/date']
s.add_tasks([t1, t2])
assert type(s.tasks) == set
assert s._task_count == 2
assert t1 in s.tasks
assert t2 in s.tasks
示例4: test_stage_post_exec
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_stage_post_exec():
global p1
p1.name = 'p1'
s = Stage()
s.name = 's1'
for t in range(NUM_TASKS):
s.add_tasks(create_single_task())
s.post_exec = condition
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 30,
'cpus': 1,
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
示例5: test_state_order
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_state_order():
"""
**Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
"""
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
p1 = Pipeline()
p1.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = create_single_task()
s.add_tasks(create_single_task())
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
appman = Amgr(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
p_state_hist = p1.state_history
assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']
s_state_hist = p1.stages[0].state_history
assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']
tasks = p1.stages[0].tasks
for t in tasks:
t_state_hist = t.state_history
assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
示例6: get_pipeline
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def get_pipeline(shared_fs=False, size=1):
p = Pipeline()
p.name = 'p'
n = 4
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
# dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>
t.executable = 'dd'
if not shared_fs:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
else:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 1024
s1.add_tasks(t)
p.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['dd']
if not shared_fs:
t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
else:
t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.tag = 't%s'%x
s2.add_tasks(t)
p.add_stages(s2)
return p
示例7: test_wfp_workflow_incomplete
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_wfp_workflow_incomplete():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
assert wfp.workflow_incomplete()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
p.stages[0].state == states.SCHEDULING
p.state == states.SCHEDULED
for t in p.stages[0].tasks:
t.state = states.COMPLETED
import json
import pika
task_as_dict = json.dumps(t.to_dict())
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
mq_channel = mq_connection.channel()
mq_channel.basic_publish(exchange='',
routing_key='%s-completedq-1' % amgr._sid,
body=task_as_dict)
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
assert not wfp.workflow_incomplete()
示例8: test_stage_check_complete
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_stage_check_complete():
s = Stage()
t1 = Task()
t1.executable = ['/bin/date']
t2 = Task()
t2.executable = ['/bin/date']
s.add_tasks([t1, t2])
assert s._check_stage_complete() == False
s._set_tasks_state(states.DONE)
assert s._check_stage_complete() == True
示例9: test_amgr_synchronizer
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_amgr_synchronizer():
logger = ru.Logger('radical.entk.temp_logger')
profiler = ru.Profiler(name='radical.entk.temp')
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
p = Pipeline()
s = Stage()
# Create and add 100 tasks to the stage
for cnt in range(100):
t = Task()
t.executable = ['some-executable-%s' % cnt]
s.add_tasks(t)
p.add_stages(s)
p._assign_uid(amgr._sid)
p._validate()
amgr.workflow = [p]
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
# Start the synchronizer method in a thread
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
# Start the synchronizer method in a thread
proc = Process(target=func_for_synchronizer_test, name='temp-proc',
args=(amgr._sid, p, logger, profiler))
proc.start()
proc.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULING
assert p.stages[0].state == states.SCHEDULING
assert p.state == states.SCHEDULING
amgr._terminate_sync.set()
sync_thread.join()
示例10: generate_pipeline
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def generate_pipeline():
def func_condition():
global CUR_NEW_STAGE, MAX_NEW_STAGE
if CUR_NEW_STAGE <= MAX_NEW_STAGE:
return True
return False
def func_on_true():
global CUR_NEW_STAGE
CUR_NEW_STAGE += 1
shuffle(p.stages[CUR_NEW_STAGE:])
def func_on_false():
print 'Done'
# Create a Pipeline object
p = Pipeline()
for s in range(MAX_NEW_STAGE+1):
# Create a Stage object
s1 = Stage()
for i in range(CUR_TASKS):
t1 = Task()
t1.executable = '/bin/sleep'
t1.arguments = [ '30']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add post-exec to the Stage
s1.post_exec = {
condition': func_condition,
on_true': func_on_true,
on_false': func_on_false
}
# Add Stage to the Pipeline
p.add_stages(s1)
return p
示例11: test_stage_set_tasks_state
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_stage_set_tasks_state():
s = Stage()
t1 = Task()
t1.executable = ['/bin/date']
t2 = Task()
t2.executable = ['/bin/date']
s.add_tasks([t1, t2])
with pytest.raises(ValueError):
s._set_tasks_state(2)
s._set_tasks_state(states.DONE)
assert t1.state == states.DONE
assert t2.state == states.DONE
示例12: on_true
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def on_true():
global NUM_TASKS, CUR_STAGE
NUM_TASKS *= 2
s = Stage()
s.name = 's%s'%CUR_STAGE
for t in range(NUM_TASKS):
s.add_tasks(create_single_task())
s.post_exec = condition
p1.add_stages(s)
示例13: create_pipeline
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['sleep']
t1.arguments = ['10']
s.add_tasks(t1)
p.add_stages(s)
return p
示例14: test_wfp_enqueue
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def test_wfp_enqueue():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULED
assert p.stages[0].state == states.SCHEDULED
assert p.state == states.SCHEDULING
示例15: create_pipeline
# 需要导入模块: from radical.entk import Stage [as 别名]
# 或者: from radical.entk.Stage import add_tasks [as 别名]
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/echo']
t1.arguments = ['hello']
t1.copy_input_data = []
t1.copy_output_data = []
s.add_tasks(t1)
p.add_stages(s)
return p