本文整理匯總了Python中radical.entk.Pipeline.add_stages方法的典型用法代碼示例。如果您正苦於以下問題:Python Pipeline.add_stages方法的具體用法?Python Pipeline.add_stages怎麽用?Python Pipeline.add_stages使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類radical.entk.Pipeline
的用法示例。
在下文中一共展示了Pipeline.add_stages方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_wfp_initialization
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_wfp_initialization(s, i, b, l):
p = Pipeline()
st = Stage()
t = Task()
t.executable = ['/bin/date']
st.add_tasks(t)
p.add_stages(st)
wfp = WFprocessor(sid='rp.session.local.0000',
workflow=set([p]),
pending_queue=['pending'],
completed_queue=['completed'],
mq_hostname=hostname,
port=port,
resubmit_failed=True)
assert len(wfp._uid.split('.')) == 2
assert 'wfprocessor' == wfp._uid.split('.')[0]
assert wfp._pending_queue == ['pending']
assert wfp._completed_queue == ['completed']
assert wfp._mq_hostname == hostname
assert wfp._port == port
assert wfp._wfp_process == None
assert wfp._workflow == set([p])
if not isinstance(s, unicode):
wfp = WFprocessor(sid=s,
workflow=set([p]),
pending_queue=l,
completed_queue=l,
mq_hostname=s,
port=i,
resubmit_failed=b)
示例2: test_pipeline_decrement_stage
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_pipeline_decrement_stage():
p = Pipeline()
s1 = Stage()
t = Task()
t.executable = ['/bin/date']
s1.tasks = t
s2 = Stage()
t = Task()
t.executable = ['/bin/date']
s2.tasks = t
p.add_stages([s1, s2])
p._increment_stage()
p._increment_stage()
assert p._stage_count == 2
assert p._cur_stage == 2
assert p._completed_flag.is_set() == True
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 1
assert p._completed_flag.is_set() == False
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 0
assert p._completed_flag.is_set() == False
示例3: generate_pipeline
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def generate_pipeline(name, stages):
# Create a Pipeline object
p = Pipeline()
p.name = name
for s_cnt in range(stages):
# Create a Stage object
s = Stage()
s.name = 'Stage %s'%s_cnt
for t_cnt in range(5):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional)
t.executable = '/bin/echo' # Assign executable to the task
# Assign arguments for the task executable
t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)]
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
return p
示例4: test_wfp_check_processor
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_wfp_check_processor():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp.start_processor()
assert wfp.check_processor()
wfp.terminate_processor()
assert not wfp.check_processor()
示例5: test_state_order
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_state_order():
"""
**Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
"""
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
p1 = Pipeline()
p1.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = create_single_task()
s.add_tasks(create_single_task())
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
appman = Amgr(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
p_state_hist = p1.state_history
assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']
s_state_hist = p1.stages[0].state_history
assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']
tasks = p1.stages[0].tasks
for t in tasks:
t_state_hist = t.state_history
assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
示例6: get_pipeline
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def get_pipeline(shared_fs=False, size=1):
p = Pipeline()
p.name = 'p'
n = 4
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
# dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>
t.executable = 'dd'
if not shared_fs:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
else:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 1024
s1.add_tasks(t)
p.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['dd']
if not shared_fs:
t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
else:
t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.tag = 't%s'%x
s2.add_tasks(t)
p.add_stages(s2)
return p
示例7: test_wfp_workflow_incomplete
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_wfp_workflow_incomplete():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
assert wfp.workflow_incomplete()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
p.stages[0].state == states.SCHEDULING
p.state == states.SCHEDULED
for t in p.stages[0].tasks:
t.state = states.COMPLETED
import json
import pika
task_as_dict = json.dumps(t.to_dict())
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
mq_channel = mq_connection.channel()
mq_channel.basic_publish(exchange='',
routing_key='%s-completedq-1' % amgr._sid,
body=task_as_dict)
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
assert not wfp.workflow_incomplete()
示例8: test_amgr_synchronizer
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_amgr_synchronizer():
logger = ru.Logger('radical.entk.temp_logger')
profiler = ru.Profiler(name='radical.entk.temp')
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
p = Pipeline()
s = Stage()
# Create and add 100 tasks to the stage
for cnt in range(100):
t = Task()
t.executable = ['some-executable-%s' % cnt]
s.add_tasks(t)
p.add_stages(s)
p._assign_uid(amgr._sid)
p._validate()
amgr.workflow = [p]
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
# Start the synchronizer method in a thread
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
# Start the synchronizer method in a thread
proc = Process(target=func_for_synchronizer_test, name='temp-proc',
args=(amgr._sid, p, logger, profiler))
proc.start()
proc.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULING
assert p.stages[0].state == states.SCHEDULING
assert p.state == states.SCHEDULING
amgr._terminate_sync.set()
sync_thread.join()
示例9: generate_pipeline
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def generate_pipeline():
def func_condition():
global CUR_NEW_STAGE, MAX_NEW_STAGE
if CUR_NEW_STAGE <= MAX_NEW_STAGE:
return True
return False
def func_on_true():
global CUR_NEW_STAGE
CUR_NEW_STAGE += 1
shuffle(p.stages[CUR_NEW_STAGE:])
def func_on_false():
print 'Done'
# Create a Pipeline object
p = Pipeline()
for s in range(MAX_NEW_STAGE+1):
# Create a Stage object
s1 = Stage()
for i in range(CUR_TASKS):
t1 = Task()
t1.executable = '/bin/sleep'
t1.arguments = [ '30']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add post-exec to the Stage
s1.post_exec = {
condition': func_condition,
on_true': func_on_true,
on_false': func_on_false
}
# Add Stage to the Pipeline
p.add_stages(s1)
return p
示例10: test_pipeline_pass_uid
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_pipeline_pass_uid():
p = Pipeline()
p._uid = 'test'
p.name = 'p1'
s1 = Stage()
s2 = Stage()
p.add_stages([s1,s2])
p._pass_uid()
assert s1.parent_pipeline['uid'] == p.uid
assert s1.parent_pipeline['name'] == p.name
assert s2.parent_pipeline['uid'] == p.uid
assert s2.parent_pipeline['name'] == p.name
示例11: create_pipeline
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['sleep']
t1.arguments = ['10']
s.add_tasks(t1)
p.add_stages(s)
return p
示例12: test_wfp_enqueue
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_wfp_enqueue():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULED
assert p.stages[0].state == states.SCHEDULED
assert p.state == states.SCHEDULING
示例13: test_pipeline_assignment_exceptions
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_pipeline_assignment_exceptions(t, l, i, b, se):
p = Pipeline()
data_type = [t, l, i, b, se]
for data in data_type:
if not isinstance(data, str):
with pytest.raises(TypeError):
p.name = data
with pytest.raises(TypeError):
p.stages = data
with pytest.raises(TypeError):
p.add_stages(data)
示例14: test_pipeline_stage_addition
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def test_pipeline_stage_addition():
p = Pipeline()
s1 = Stage()
t = Task()
t.executable = ['/bin/date']
s1.tasks = t
s2 = Stage()
t = Task()
t.executable = ['/bin/date']
s2.tasks = t
p.add_stages([s1, s2])
assert type(p.stages) == list
assert p._stage_count == 2
assert p._cur_stage == 1
assert p.stages[0] == s1
assert p.stages[1] == s2
示例15: create_pipeline
# 需要導入模塊: from radical.entk import Pipeline [as 別名]
# 或者: from radical.entk.Pipeline import add_stages [as 別名]
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/echo']
t1.arguments = ['hello']
t1.copy_input_data = []
t1.copy_output_data = []
s.add_tasks(t1)
p.add_stages(s)
return p