本文整理匯總了Python中radical.entk.Pipeline類的典型用法代碼示例。如果您正苦於以下問題:Python Pipeline類的具體用法?Python Pipeline怎麽用?Python Pipeline使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Pipeline類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_wfp_check_processor
def test_wfp_check_processor():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp.start_processor()
assert wfp.check_processor()
wfp.terminate_processor()
assert not wfp.check_processor()
示例2: generate_pipeline
def generate_pipeline(name, stages):
# Create a Pipeline object
p = Pipeline()
p.name = name
for s_cnt in range(stages):
# Create a Stage object
s = Stage()
s.name = 'Stage %s'%s_cnt
for t_cnt in range(5):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional)
t.executable = '/bin/echo' # Assign executable to the task
# Assign arguments for the task executable
t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)]
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
return p
示例3: test_wfp_initialization
def test_wfp_initialization(s, i, b, l):
p = Pipeline()
st = Stage()
t = Task()
t.executable = ['/bin/date']
st.add_tasks(t)
p.add_stages(st)
wfp = WFprocessor(sid='rp.session.local.0000',
workflow=set([p]),
pending_queue=['pending'],
completed_queue=['completed'],
mq_hostname=hostname,
port=port,
resubmit_failed=True)
assert len(wfp._uid.split('.')) == 2
assert 'wfprocessor' == wfp._uid.split('.')[0]
assert wfp._pending_queue == ['pending']
assert wfp._completed_queue == ['completed']
assert wfp._mq_hostname == hostname
assert wfp._port == port
assert wfp._wfp_process == None
assert wfp._workflow == set([p])
if not isinstance(s, unicode):
wfp = WFprocessor(sid=s,
workflow=set([p]),
pending_queue=l,
completed_queue=l,
mq_hostname=s,
port=i,
resubmit_failed=b)
示例4: test_state_order
def test_state_order():
"""
**Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
"""
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
p1 = Pipeline()
p1.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = create_single_task()
s.add_tasks(create_single_task())
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
appman = Amgr(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
p_state_hist = p1.state_history
assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']
s_state_hist = p1.stages[0].state_history
assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']
tasks = p1.stages[0].tasks
for t in tasks:
t_state_hist = t.state_history
assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
示例5: test_pipeline_to_dict
def test_pipeline_to_dict():
p = Pipeline()
d = p.to_dict()
assert d == {'uid': None,
'name': None,
'state': states.INITIAL,
'state_history': [states.INITIAL],
'completed': False}
示例6: test_pipeline_decrement_stage
def test_pipeline_decrement_stage():
p = Pipeline()
s1 = Stage()
t = Task()
t.executable = ['/bin/date']
s1.tasks = t
s2 = Stage()
t = Task()
t.executable = ['/bin/date']
s2.tasks = t
p.add_stages([s1, s2])
p._increment_stage()
p._increment_stage()
assert p._stage_count == 2
assert p._cur_stage == 2
assert p._completed_flag.is_set() == True
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 1
assert p._completed_flag.is_set() == False
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 0
assert p._completed_flag.is_set() == False
示例7: test_wfp_workflow_incomplete
def test_wfp_workflow_incomplete():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
assert wfp.workflow_incomplete()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
p.stages[0].state == states.SCHEDULING
p.state == states.SCHEDULED
for t in p.stages[0].tasks:
t.state = states.COMPLETED
import json
import pika
task_as_dict = json.dumps(t.to_dict())
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
mq_channel = mq_connection.channel()
mq_channel.basic_publish(exchange='',
routing_key='%s-completedq-1' % amgr._sid,
body=task_as_dict)
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
assert not wfp.workflow_incomplete()
示例8: test_pipeline_stage_assignment
def test_pipeline_stage_assignment():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.tasks = t
p.stages = s
assert type(p.stages) == list
assert p._stage_count == 1
assert p._cur_stage == 1
assert p.stages[0] == s
示例9: generate_pipeline
def generate_pipeline():
def func_condition():
global CUR_NEW_STAGE, MAX_NEW_STAGE
if CUR_NEW_STAGE <= MAX_NEW_STAGE:
return True
return False
def func_on_true():
global CUR_NEW_STAGE
CUR_NEW_STAGE += 1
shuffle(p.stages[CUR_NEW_STAGE:])
def func_on_false():
print 'Done'
# Create a Pipeline object
p = Pipeline()
for s in range(MAX_NEW_STAGE+1):
# Create a Stage object
s1 = Stage()
for i in range(CUR_TASKS):
t1 = Task()
t1.executable = '/bin/sleep'
t1.arguments = [ '30']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add post-exec to the Stage
s1.post_exec = {
condition': func_condition,
on_true': func_on_true,
on_false': func_on_false
}
# Add Stage to the Pipeline
p.add_stages(s1)
return p
示例10: test_pipeline_assign_uid
def test_pipeline_assign_uid():
p = Pipeline()
try:
import glob
import shutil
import os
home = os.environ.get('HOME','/home')
test_fold = glob.glob('%s/.radical/utils/test*'%home)
for f in test_fold:
shutil.rmtree(f)
except:
pass
p._assign_uid('test')
assert p.uid == 'pipeline.0000'
示例11: create_pipeline
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['sleep']
t1.arguments = ['10']
s.add_tasks(t1)
p.add_stages(s)
return p
示例12: test_pipeline_state_assignment
def test_pipeline_state_assignment(t, l, i, b):
p = Pipeline()
data_type = [l, i, b]
for data in data_type:
with pytest.raises(TypeError):
p.state = data
if isinstance(t,str):
with pytest.raises(ValueError):
p.state = t
for val in states._pipeline_state_values.keys():
p.state = val
示例13: test_pipeline_from_dict
def test_pipeline_from_dict():
d = {'uid': 're.Pipeline.0000',
'name': 'p1',
'state': states.DONE,
'state_history': [states.INITIAL, states.DONE],
'completed': True}
p = Pipeline()
p.from_dict(d)
assert p.uid == d['uid']
assert p.name == d['name']
assert p.state == d['state']
assert p.state_history == d['state_history']
assert p.completed == d['completed']
示例14: test_pipeline_validate_entities
def test_pipeline_validate_entities(t, l, i, b, se):
p = Pipeline()
data_type = [t, l, i, b, se]
for data in data_type:
with pytest.raises(TypeError):
p._validate_entities(data)
s = Stage()
assert isinstance(p._validate_entities(s), list)
s1 = Stage()
s2 = Stage()
assert [s1,s2] == p._validate_entities([s1,s2])
示例15: test_wfp_enqueue
def test_wfp_enqueue():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULED
assert p.stages[0].state == states.SCHEDULED
assert p.state == states.SCHEDULING