本文整理汇总了Python中radical.entk.Stage类的典型用法代码示例。如果您正苦于以下问题:Python Stage类的具体用法?Python Stage怎么用?Python Stage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Stage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_wfp_check_processor
def test_wfp_check_processor():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp.start_processor()
assert wfp.check_processor()
wfp.terminate_processor()
assert not wfp.check_processor()
示例2: generate_pipeline
def generate_pipeline(name, stages):
# Create a Pipeline object
p = Pipeline()
p.name = name
for s_cnt in range(stages):
# Create a Stage object
s = Stage()
s.name = 'Stage %s'%s_cnt
for t_cnt in range(5):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional)
t.executable = '/bin/echo' # Assign executable to the task
# Assign arguments for the task executable
t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)]
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
return p
示例3: test_pipeline_decrement_stage
def test_pipeline_decrement_stage():
p = Pipeline()
s1 = Stage()
t = Task()
t.executable = ['/bin/date']
s1.tasks = t
s2 = Stage()
t = Task()
t.executable = ['/bin/date']
s2.tasks = t
p.add_stages([s1, s2])
p._increment_stage()
p._increment_stage()
assert p._stage_count == 2
assert p._cur_stage == 2
assert p._completed_flag.is_set() == True
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 1
assert p._completed_flag.is_set() == False
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 0
assert p._completed_flag.is_set() == False
示例4: test_stage_parent_pipeline_assignment
def test_stage_parent_pipeline_assignment(l, i, b):
s = Stage()
data_type = [l, i, b]
for data in data_type:
with pytest.raises(TypeError):
s.parent_pipeline = data
示例5: test_stage_post_exec
def test_stage_post_exec():
global p1
p1.name = 'p1'
s = Stage()
s.name = 's1'
for t in range(NUM_TASKS):
s.add_tasks(create_single_task())
s.post_exec = condition
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 30,
'cpus': 1,
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
示例6: test_state_order
def test_state_order():
"""
**Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
"""
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
p1 = Pipeline()
p1.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = create_single_task()
s.add_tasks(create_single_task())
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
appman = Amgr(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
p_state_hist = p1.state_history
assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']
s_state_hist = p1.stages[0].state_history
assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']
tasks = p1.stages[0].tasks
for t in tasks:
t_state_hist = t.state_history
assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
示例7: test_stage_to_dict
def test_stage_to_dict():
s = Stage()
d = s.to_dict()
assert d == {'uid': None,
'name': None,
'state': states.INITIAL,
'state_history': [states.INITIAL],
'parent_pipeline': {'uid': None, 'name': None}}
示例8: test_wfp_workflow_incomplete
def test_wfp_workflow_incomplete():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
assert wfp.workflow_incomplete()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
p.stages[0].state == states.SCHEDULING
p.state == states.SCHEDULED
for t in p.stages[0].tasks:
t.state = states.COMPLETED
import json
import pika
task_as_dict = json.dumps(t.to_dict())
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
mq_channel = mq_connection.channel()
mq_channel.basic_publish(exchange='',
routing_key='%s-completedq-1' % amgr._sid,
body=task_as_dict)
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
assert not wfp.workflow_incomplete()
示例9: test_pipeline_stage_assignment
def test_pipeline_stage_assignment():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.tasks = t
p.stages = s
assert type(p.stages) == list
assert p._stage_count == 1
assert p._cur_stage == 1
assert p.stages[0] == s
示例10: test_stage_task_assignment
def test_stage_task_assignment():
"""
***Purpose***: Test if necessary attributes are automatically updates upon task assignment
"""
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.tasks = t
assert type(s.tasks) == set
assert s._task_count == 1
assert t in s.tasks
示例11: test_amgr_synchronizer
def test_amgr_synchronizer():
logger = ru.Logger('radical.entk.temp_logger')
profiler = ru.Profiler(name='radical.entk.temp')
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
p = Pipeline()
s = Stage()
# Create and add 100 tasks to the stage
for cnt in range(100):
t = Task()
t.executable = ['some-executable-%s' % cnt]
s.add_tasks(t)
p.add_stages(s)
p._assign_uid(amgr._sid)
p._validate()
amgr.workflow = [p]
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
# Start the synchronizer method in a thread
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
# Start the synchronizer method in a thread
proc = Process(target=func_for_synchronizer_test, name='temp-proc',
args=(amgr._sid, p, logger, profiler))
proc.start()
proc.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULING
assert p.stages[0].state == states.SCHEDULING
assert p.state == states.SCHEDULING
amgr._terminate_sync.set()
sync_thread.join()
示例12: generate_pipeline
def generate_pipeline():
def func_condition():
global CUR_NEW_STAGE, MAX_NEW_STAGE
if CUR_NEW_STAGE <= MAX_NEW_STAGE:
return True
return False
def func_on_true():
global CUR_NEW_STAGE
CUR_NEW_STAGE += 1
shuffle(p.stages[CUR_NEW_STAGE:])
def func_on_false():
print 'Done'
# Create a Pipeline object
p = Pipeline()
for s in range(MAX_NEW_STAGE+1):
# Create a Stage object
s1 = Stage()
for i in range(CUR_TASKS):
t1 = Task()
t1.executable = '/bin/sleep'
t1.arguments = [ '30']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add post-exec to the Stage
s1.post_exec = {
condition': func_condition,
on_true': func_on_true,
on_false': func_on_false
}
# Add Stage to the Pipeline
p.add_stages(s1)
return p
示例13: generate_pipeline
def generate_pipeline():
# Create a Pipeline object
p = Pipeline()
# Create a Stage object
s1 = Stage()
# Create a Task object which creates a file named 'output.txt' of size 1 MB
t1 = Task()
t1.executable = '/bin/bash'
t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add Stage to the Pipeline
p.add_stages(s1)
# Create another Stage object to hold character count tasks
s2 = Stage()
# Create a Task object
t2 = Task()
t2.executable = '/bin/bash'
t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt']
# Copy data from the task in the first stage to the current task's location
t2.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/output.txt' % (p.uid, s1.uid, t1.uid)]
# Add the Task to the Stage
s2.add_tasks(t2)
# Add Stage to the Pipeline
p.add_stages(s2)
# Create another Stage object to hold checksum tasks
s3 = Stage()
# Create a Task object
t3 = Task()
t3.executable = '/bin/bash'
t3.arguments = ['-l', '-c', 'sha1sum ccount.txt > chksum.txt']
# Copy data from the task in the first stage to the current task's location
t3.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/ccount.txt' % (p.uid, s2.uid, t2.uid)]
# Download the output of the current task to the current location
t3.download_output_data = ['chksum.txt > chksum_%s.txt' % cnt]
# Add the Task to the Stage
s3.add_tasks(t3)
# Add Stage to the Pipeline
p.add_stages(s3)
return p
示例14: test_stage_assign_uid
def test_stage_assign_uid():
s = Stage()
try:
import glob
import shutil
import os
home = os.environ.get('HOME','/home')
test_fold = glob.glob('%s/.radical/utils/test*'%home)
for f in test_fold:
shutil.rmtree(f)
except:
pass
s._assign_uid('test')
assert s.uid == 'stage.0000'
示例15: on_true
def on_true():
global NUM_TASKS, CUR_STAGE
NUM_TASKS *= 2
s = Stage()
s.name = 's%s'%CUR_STAGE
for t in range(NUM_TASKS):
s.add_tasks(create_single_task())
s.post_exec = condition
p1.add_stages(s)