本文整理汇总了Python中pypet.Environment.run方法的典型用法代码示例。如果您正苦于以下问题:Python Environment.run方法的具体用法?Python Environment.run怎么用?Python Environment.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pypet.Environment
的用法示例。
在下文中一共展示了Environment.run方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
batch = get_batch()
filename = 'saga_%s.hdf5' % str(batch)
env = Environment(trajectory='Example_22_Euler_Integration_%s' % str(batch),
filename=filename,
file_title='Example_22_Euler_Integration',
comment='Go for Euler!',
overwrite_file=True,
multiproc=True, # Yes we can use multiprocessing within each batch!
ncores=4)
traj = env.trajectory
trajectory_name = traj.v_name
# 1st a) phase parameter addition
add_parameters(traj)
# 1st b) phase preparation
# We will add the differential equation (well, its source code only) as a derived parameter
traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
comment='Source code of our equation!')
# explore the trajectory
explore_batch(traj, batch)
# 2nd phase let's run the experiment
# We pass `euler_scheme` as our top-level simulation function and
# the Lorenz equation 'diff_lorenz' as an additional argument
env.run(euler_scheme, diff_lorenz)
示例2: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
filename = os.path.join('hdf5', 'FiringRate.hdf5')
env = Environment(trajectory='FiringRate',
comment='Experiment to measure the firing rate '
'of a leaky integrate and fire neuron. '
'Exploring different input currents, '
'as well as refractory periods',
add_time=False, # We don't want to add the current time to the name,
log_stdout=True,
log_config='DEFAULT',
multiproc=True,
ncores=2, #My laptop has 2 cores ;-)
wrap_mode='QUEUE',
filename=filename,
overwrite_file=True)
traj = env.trajectory
# Add parameters
add_parameters(traj)
# Let's explore
add_exploration(traj)
# Ad the postprocessing function
env.add_postprocessing(neuron_postproc)
# Run the experiment
env.run(run_neuron)
# Finally disable logging and close all log-files
env.disable_logging()
示例3: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
""" Main *boilerplate* function to start simulation """
# Now let's make use of logging
logger = logging.getLogger()
# Create folders for data and plots
folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
if not os.path.isdir(folder):
os.makedirs(folder)
filename = os.path.join(folder, 'all_patterns.hdf5')
# Create an environment
env = Environment(trajectory='cellular_automata',
multiproc=True,
ncores=4,
wrap_mode='QUEUE',
filename=filename,
overwrite_file=True)
# extract the trajectory
traj = env.traj
traj.v_lazy_adding = True
traj.par.ncells = 400, 'Number of cells'
traj.par.steps = 250, 'Number of timesteps'
traj.par.rule_number = 30, 'The ca rule'
traj.par.initial_name = 'random', 'The type of initial state'
traj.par.seed = 100042, 'RNG Seed'
# Explore
exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
'initial_name' : ['single', 'random'],}
# # You can uncomment the ``exp_dict`` below to see that changing the
# # exploration scheme is now really easy:
# exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
# 'ncells' : [100, 200, 300],
# 'seed': [333444555, 123456]}
exp_dict = cartesian_product(exp_dict)
traj.f_explore(exp_dict)
# Run the simulation
logger.info('Starting Simulation')
env.run(wrap_automaton)
# Load all data
traj.f_load(load_data=2)
logger.info('Printing data')
for idx, run_name in enumerate(traj.f_iter_runs()):
# Plot all patterns
filename = os.path.join(folder, make_filename(traj))
plot_pattern(traj.crun.pattern, traj.rule_number, filename)
progressbar(idx, len(traj), logger=logger)
# Finally disable logging and close all log-files
env.disable_logging()
示例4: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
"""Main function to protect the *entry point* of the program.
If you want to use multiprocessing under Windows you need to wrap your
main code creating an environment into a function. Otherwise
the newly started child processes will re-execute the code and throw
errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).
"""
# Create an environment that handles running.
# Let's enable multiprocessing with 2 workers.
filename = os.path.join('hdf5', 'example_04.hdf5')
env = Environment(trajectory='Example_04_MP',
filename=filename,
file_title='Example_04_MP',
log_stdout=True,
comment='Multiprocessing example!',
multiproc=True,
ncores=4,
use_pool=True, # Our runs are inexpensive we can get rid of overhead
# by using a pool
freeze_input=True, # We can avoid some
# overhead by freezing the input to the pool
wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
graceful_exit=True, # We want to exit in a data friendly way
# that safes all results after hitting CTRL+C, try it ;-)
overwrite_file=True)
# Get the trajectory from the environment
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')
# Explore the parameters with a cartesian product, but we want to explore a bit more
traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
'y':[float(y) for y in range(20)]}))
# Run the simulation
env.run(multiply)
# Finally disable logging and close all log-files
env.disable_logging()
示例5: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
"""Main function to protect the *entry point* of the program.
If you want to use multiprocessing with SCOOP you need to wrap your
main code creating an environment into a function. Otherwise
the newly started child processes will re-execute the code and throw
errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls).
"""
# Create an environment that handles running.
# Let's enable multiprocessing with scoop:
filename = os.path.join('hdf5', 'example_21.hdf5')
env = Environment(trajectory='Example_21_SCOOP',
filename=filename,
file_title='Example_21_SCOOP',
log_stdout=True,
comment='Multiprocessing example using SCOOP!',
multiproc=True,
freeze_input=True, # We want to save overhead and freeze input
use_scoop=True, # Yes we want SCOOP!
wrap_mode=pypetconstants.WRAP_MODE_LOCAL, # SCOOP only works with 'LOCAL'
# or 'NETLOCK' wrapping
overwrite_file=True)
# Get the trajectory from the environment
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')
# Explore the parameters with a cartesian product, but we want to explore a bit more
traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
'y':[float(y) for y in range(20)]}))
# Run the simulation
env.run(multiply)
# Let's check that all runs are completed!
assert traj.f_is_completed()
# Finally disable logging and close all log-files
env.disable_logging()
示例6: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
# Create an environment that handles running
filename = os.path.join('hdf5','example_18.hdf5')
env = Environment(trajectory='Multiplication',
filename=filename,
file_title='Example_18_Many_Runs',
overwrite_file=True,
comment='Contains many runs',
multiproc=True,
use_pool=True,
freeze_input=True,
ncores=2,
wrap_mode='QUEUE')
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')
# Explore the parameters with a cartesian product, yielding 2500 runs
traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))
# Run the simulation
env.run(multiply)
# Disable logging
env.disable_logging()
# turn auto loading on, since results have not been loaded, yet
traj.v_auto_load = True
# Use the `v_idx` functionality
traj.v_idx = 2042
print('The result of run %d is: ' % traj.v_idx)
# Now we can rely on the wildcards
print(traj.res.crunset.crun.z)
traj.v_idx = -1
# Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
print('The result of run %d is: ' % 2044)
print(traj.res.rts_2044.r_2044.z)
示例7: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main(inputargs):
args = docopt(__doc__, argv=inputargs)
wavpath = path.join(modulePath, "resources", "tone_in_noise")
stimuli = [path.join(wavpath, i) for i in glob.glob(path.join(wavpath, "*.wav"))]
outfile = path.realpath(path.expanduser(args["--out"]))
env = Environment(trajectory='tone-in-noise',
filename=outfile,
overwrite_file=True,
file_title="Tone in noise at different SNR",
comment="some comment",
large_overview_tables="False",
# freeze_input=True,
# use_pool=True,
multiproc=True,
ncores=3,
graceful_exit=True,
#wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
)
traj = env.trajectory
traj.f_add_parameter('periphery', 'verhulst', comment="which periphery was used")
traj.f_add_parameter('brainstem', 'nelsoncarney04', comment="which brainstem model was used")
traj.f_add_parameter('weighting', "--no-cf-weighting ", comment="weighted CFs")
traj.f_add_parameter('wavfile', '', comment="Which wav file to run")
traj.f_add_parameter('level', 80, comment="stimulus level, spl")
traj.f_add_parameter('neuropathy', "none", comment="")
parameter_dict = {
"periphery" : ['verhulst', 'zilany'],
"brainstem" : ['nelsoncarney04', 'carney2015'],
"weighting" : [cf_weighting, ""],
"wavfile" : stimuli,
"level" : [80],
"neuropathy": ["none", "moderate", "severe", "ls-moderate", "ls-severe"]
}
traj.f_explore(cartesian_product(parameter_dict))
env.run(tone_in_noise)
return 0
示例8: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
# Create an environment that handles running
filename = os.path.join('hdf5', 'example_12.hdf5')
env = Environment(trajectory='Multiplication',
filename=filename,
file_title='Example_12_Sharing_Data',
overwrite_file=True,
comment='The first example!',
continuable=False, # We have shared data in terms of a multiprocessing list,
# so we CANNOT use the continue feature.
multiproc=True,
ncores=2)
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')
# Explore the parameters with a cartesian product
traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))
# We want a shared list where we can put all out results in. We use a manager for this:
result_list = mp.Manager().list()
# Let's make some space for potential results
result_list[:] =[0 for _dummy in range(len(traj))]
# Run the simulation
env.run(multiply, result_list)
# Now we want to store the final list as numpy array
traj.f_add_result('z', np.array(result_list))
# Finally let's print the result to see that it worked
print(traj.z)
#Disable logging and close all log-files
env.disable_logging()
示例9: print
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.v_lazy_adding = True
traj.par.x = 1, 'I am the first dimension!'
traj.par.y = 1, 'I am the second dimension!'
# Explore just two points
traj.f_explore({'x': [3, 4]})
# So far everything was as in the first example. However now we add links:
traj.f_add_link('mylink1', traj.f_get('x'))
# Note the `f_get` here to ensure to get the parameter instance, not the value 1
# This allows us now to access x differently:
print('x=' + str(traj.mylink1))
# We can try to avoid fast access as well, and recover the original parameter
print(str(traj.f_get('mylink1')))
# And also colon notation is allowed that creates new groups on the fly
traj.f_add_link('parameters.mynewgroup.mylink2', traj.f_get('y'))
# And, of course, we can also use the links during run:
env.run(multiply)
# Finally disable logging and close all log-files
env.disable_logging()
示例10: O
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
traj2 = env2.trajectory
# Add both parameters
traj1.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj1.f_add_parameter('y', 1.0, comment='I am the second dimension!')
traj2.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj2.f_add_parameter('y', 1.0, comment='I am the second dimension!')
# Explore the parameters with a cartesian product for the first trajectory:
traj1.f_explore(cartesian_product({'x':[1.0,2.0,3.0,4.0], 'y':[6.0,7.0,8.0]}))
# Let's explore slightly differently for the second:
traj2.f_explore(cartesian_product({'x':[3.0,4.0,5.0,6.0], 'y':[7.0,8.0,9.0]}))
# Run the simulations with all parameter combinations
env1.run(multiply)
env2.run(multiply)
# Now we merge them together into traj1
# We want to remove duplicate entries
# like the parameter space point x=3.0, y=7.0.
# Several points have been explored by both trajectories and we need them only once.
# Therefore, we set remove_duplicates=True (Note this takes O(N1*N2)!).
# We also want to backup both trajectories, but we let the system choose the filename.
# Accordingly we choose backup_filename=True instead of providing a filename.
# We want to move the hdf5 nodes from one trajectory to the other.
# Thus we set move_nodes=True.
# Finally,we want to delete the other trajectory afterwards since we already have a backup.
traj1.f_merge(traj2,
remove_duplicates=True,
backup_filename=True,
示例11: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
env = Environment(trajectory='postproc_deap',
overwrite_file=True,
log_stdout=False,
log_level=50, # only display ERRORS
automatic_storing=True, # Since we us post-processing, we
# can safely enable automatic storing, because everything will
# only be stored once at the very end of all runs.
comment='Using pypet and DEAP with less overhead'
)
traj = env.traj
# ------- Add parameters ------- #
traj.f_add_parameter('popsize', 100, comment='Population size')
traj.f_add_parameter('CXPB', 0.5, comment='Crossover term')
traj.f_add_parameter('MUTPB', 0.2, comment='Mutation probability')
traj.f_add_parameter('NGEN', 20, comment='Number of generations')
traj.f_add_parameter('generation', 0, comment='Current generation')
traj.f_add_parameter('ind_idx', 0, comment='Index of individual')
traj.f_add_parameter('ind_len', 50, comment='Length of individual')
traj.f_add_parameter('indpb', 0.005, comment='Mutation parameter')
traj.f_add_parameter('tournsize', 3, comment='Selection parameter')
traj.f_add_parameter('seed', 42, comment='Seed for RNG')
# Placeholders for individuals and results that are about to be explored
traj.f_add_derived_parameter('individual', [0 for x in range(traj.ind_len)],
'An indivudal of the population')
traj.f_add_result('fitnesses', [], comment='Fitnesses of all individuals')
# ------- Create and register functions with DEAP ------- #
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_bool", random.randint, 0, 1)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, traj.ind_len)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Operator registering
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb)
toolbox.register("select", tools.selTournament, tournsize=traj.tournsize)
# ------- Initialize Population and Trajectory -------- #
random.seed(traj.seed)
pop = toolbox.population(n=traj.popsize)
eval_pop = [ind for ind in pop if not ind.fitness.valid]
traj.f_explore(cartesian_product({'generation': [0],
'ind_idx': range(len(eval_pop)),
'individual':[list(x) for x in eval_pop]},
[('ind_idx', 'individual'),'generation']))
# ----------- Add postprocessing ------------------ #
postproc = Postprocessing(pop, eval_pop, toolbox) # Add links to important structures
env.add_postprocessing(postproc)
# ------------ Run applying post-processing ---------- #
env.run(eval_one_max)
# ------------ Finished all runs and print result --------------- #
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
示例12: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
filename = os.path.join('hdf5', 'example_06.hdf5')
env = Environment(trajectory='Example_06_Euler_Integration',
filename=filename,
file_title='Example_06_Euler_Integration',
overwrite_file=True,
comment = 'Go for Euler!')
traj = env.trajectory
# 1st a) phase parameter addition
# Remember we have some control flow in the `add_parameters` function, the default parameter
# set we choose is the `'diff_lorenz'` one, but we want to deviate from that and use the
# `'diff_roessler'`.
# In order to do that we can preset the corresponding name parameter to change the
# control flow:
traj.f_preset_parameter('diff_name', 'diff_roessler') # If you erase this line, you will get
# again the lorenz attractor
add_parameters(traj)
# 1st b) phase preparation
# Let's check which function we want to use
if traj.diff_name=='diff_lorenz':
diff_eq = diff_lorenz
elif traj.diff_name=='diff_roessler':
diff_eq = diff_roessler
else:
raise ValueError('I don\'t know what %s is.' % traj.diff_name)
# And add the source code of the function as a derived parameter.
traj.f_add_derived_parameter(FunctionParameter, 'diff_eq', diff_eq,
comment='Source code of our equation!')
# We want to explore some initial conditions
traj.f_explore({'initial_conditions' : [
np.array([0.01,0.01,0.01]),
np.array([2.02,0.02,0.02]),
np.array([42.0,4.2,0.42])
]})
# 3 different conditions are enough for now
# 2nd phase let's run the experiment
# We pass 'euler_scheme' as our top-level simulation function and
# the Roessler function as an additional argument
env.run(euler_scheme, diff_eq)
# Again no post-processing
# 4th phase analysis.
# I would recommend to do the analysis completely independent from the simulation
# but for simplicity let's do it here.
# We won't reload the trajectory this time but simply update the skeleton
traj.f_load_skeleton()
#For the fun of it, let's print the source code
print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)
# Let's get the exploration array:
initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
# Now let's plot our simulated equations for the different initial conditions.
# We will iterate through the run names
for idx, run_name in enumerate(traj.f_get_run_names()):
# Get the result of run idx from the trajectory
euler_result = traj.results.f_get(run_name).euler_evolution
# Now we manually need to load the result. Actually the results are not so large and we
# could load them all at once, but for demonstration we do as if they were huge:
traj.f_load_item(euler_result)
euler_data = euler_result.data
# Plot fancy 3d plot
fig = plt.figure(idx)
ax = fig.gca(projection='3d')
x = euler_data[:,0]
y = euler_data[:,1]
z = euler_data[:,2]
ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
plt.legend()
plt.show()
# Now we free the data again (because we assume its huuuuuuge):
del euler_data
euler_result.f_empty()
# Finally disable logging and close all log-files
env.disable_logging()
示例13: main
# 需要导入模块: from pypet import Environment [as 别名]
# 或者: from pypet.Environment import run [as 别名]
def main():
filename = os.path.join('hdf5', 'example_05.hdf5')
env = Environment(trajectory='Example_05_Euler_Integration',
filename=filename,
file_title='Example_05_Euler_Integration',
overwrite_file=True,
comment='Go for Euler!')
traj = env.trajectory
trajectory_name = traj.v_name
# 1st a) phase parameter addition
add_parameters(traj)
# 1st b) phase preparation
# We will add the differential equation (well, its source code only) as a derived parameter
traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
comment='Source code of our equation!')
# We want to explore some initial conditions
traj.f_explore({'initial_conditions' : [
np.array([0.01,0.01,0.01]),
np.array([2.02,0.02,0.02]),
np.array([42.0,4.2,0.42])
]})
# 3 different conditions are enough for an illustrative example
# 2nd phase let's run the experiment
# We pass `euler_scheme` as our top-level simulation function and
# the Lorenz equation 'diff_lorenz' as an additional argument
env.run(euler_scheme, diff_lorenz)
# We don't have a 3rd phase of post-processing here
# 4th phase analysis.
# I would recommend to do post-processing completely independent from the simulation,
# but for simplicity let's do it here.
# Let's assume that we start all over again and load the entire trajectory new.
# Yet, there is an error within this approach, do you spot it?
del traj
traj = Trajectory(filename=filename)
# We will only fully load parameters and derived parameters.
# Results will be loaded manually later on.
try:
# However, this will fail because our trajectory does not know how to
# build the FunctionParameter. You have seen this coming, right?
traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
load_results=1)
except ImportError as e:
print('That did\'nt work, I am sorry: %s ' % str(e))
# Ok, let's try again but this time with adding our parameter to the imports
traj = Trajectory(filename=filename,
dynamically_imported_classes=FunctionParameter)
# Now it works:
traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
load_results=1)
#For the fun of it, let's print the source code
print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)
# Let's get the exploration array:
initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
# Now let's plot our simulated equations for the different initial conditions:
# We will iterate through the run names
for idx, run_name in enumerate(traj.f_get_run_names()):
#Get the result of run idx from the trajectory
euler_result = traj.results.f_get(run_name).euler_evolution
# Now we manually need to load the result. Actually the results are not so large and we
# could load them all at once. But for demonstration we do as if they were huge:
traj.f_load_item(euler_result)
euler_data = euler_result.data
#Plot fancy 3d plot
fig = plt.figure(idx)
ax = fig.gca(projection='3d')
x = euler_data[:,0]
y = euler_data[:,1]
z = euler_data[:,2]
ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
plt.legend()
plt.show()
# Now we free the data again (because we assume its huuuuuuge):
del euler_data
euler_result.f_empty()
# You have to click through the images to stop the example_05 module!
# Finally disable logging and close all log-files
env.disable_logging()