本文整理汇总了Python中mystic.tools.random_seed函数的典型用法代码示例。如果您正苦于以下问题:Python random_seed函数的具体用法?Python random_seed怎么用?Python random_seed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_seed函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: optimize
def optimize(cost,lb,ub):
from pathos.pools import ProcessPool as Pool
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
random_seed(123)
#stepmon = VerboseMonitor(100)
stepmon = Monitor()
evalmon = Monitor()
ndim = len(lb) # [(1 + RVend) - RVstart] + 1
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
solver.SetMapper(Pool().map)
tol = convergence_tol
solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
CrossProbability=crossover,ScalingFactor=percent_change)
print("solved: %s" % solver.bestSolution)
scale = 1.0
diameter_squared = -solver.bestEnergy / scale #XXX: scale != 0
func_evals = solver.evaluations
return diameter_squared, func_evals
示例2: optimize
def optimize(cost, bounds, tolerance, _constraints):
(lb,ub) = bounds
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
if debug: random_seed(123)
evalmon = Monitor(); stepmon = Monitor()
if debug: stepmon = VerboseMonitor(10)
ndim = len(lb)
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \
CrossProbability=crossover,ScalingFactor=percent_change, \
constraints = _constraints)
solved = solver.Solution()
diameter_squared = solver.bestEnergy
func_evals = len(evalmon)
return solved, diameter_squared, func_evals
示例3: main
def main(self, *args, **kwds):
# general solver
# exception for DifferentialEvolutionSolver2
if self.inventory.solver == 'DifferentialEvolution2':
solvername = DifferentialEvolutionSolver2
else:
solvername = eval(self.inventory.solver + 'Solver')
# create the solver
try:
NP = self.mod.NP
solver = solvername(self.mod.ND, NP)
except:
solver = solvername(self.mod.ND)
costfunction = self.mod.cost
termination = self.mod.termination
from mystic.tools import random_seed
random_seed(123)
# set initial points
try:
solver.SetInitialPoints(self.mod.x0)
except:
solver.SetRandomInitialPoints(self.mod.min, self.mod.max)
# set maximum number of iterations
try:
maxiter = self.mod.maxiter
solver.SetEvaluationLimits(generations=maxiter)
except:
pass
# set bounds, if applicable
try:
min_bounds = self.mod.min_bounds
max_bounds = self.mod.max_bounds
solver.SetStrictRanges(min_bounds, max_bounds)
except:
pass
# additional arguments/kwds to the Solve() call
try:
solverkwds = self.mod.solverkwds
except:
solverkwds = {}
solver.Solve(costfunction, termination, **solverkwds)
self.solution = solver.Solution()
return
示例4: __test2
def __test2():
# From branches/UQ/math/cut.py
from mystic.tools import random_seed
random_seed(123)
lower = [-60.0, -10.0, -50.0]
upper = [105.0, 30.0, 75.0]
def model(x):
x1,x2,x3 = x
if x1 > (x2 + x3): return x1*x2 - x3
return 0.0
failure,success = sample(model,lower,upper)
pof = float(failure) / float(failure + success)
print "PoF using method 1: %s" % pof
random_seed(123)
print "PoF using method 2: %s" % sampled_pof(model,lower,upper)
示例5: optimize
def optimize(cost,_bounds,_constraints):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
from mystic.termination import Or, CollapseWeight, CollapsePosition, state
if debug:
random_seed(123) # or 666 to force impose_unweighted reweighting
stepmon = VerboseMonitor(1,1)
else:
stepmon = VerboseMonitor(10) if verbose else Monitor()
stepmon._npts = npts
evalmon = Monitor()
lb,ub = _bounds
ndim = len(lb)
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
solver.SetConstraints(_constraints)
tol = convergence_tol
term = Or(COG(tol,ngen), CollapseWeight(), CollapsePosition())
solver.Solve(cost,termination=term,strategy=Best1Exp, disp=verbose, \
CrossProbability=crossover,ScalingFactor=percent_change)
#while collapse and solver.Collapse(verbose): #XXX: total_evaluations?
# if debug: print(state(solver._termination).keys())
# solver.Solve() #XXX: cost, term, strategy, cross, scale ?
# if debug: solver.SaveSolver('debug.pkl')
solved = solver.bestSolution
#print("solved: %s" % solver.Solution())
func_max = MINMAX * solver.bestEnergy #NOTE: -solution assumes -Max
#func_max = 1.0 + MINMAX*solver.bestEnergy #NOTE: 1-sol => 1-success = fail
func_evals = solver.evaluations
from mystic.munge import write_support_file
write_support_file(stepmon, npts=npts)
return solved, func_max, func_evals
示例6: test2
def test2(monitor, diffenv=None):
if diffenv == True:
#from mystic.solvers import DifferentialEvolutionSolver as DE
from mystic.solvers import DifferentialEvolutionSolver2 as DE
elif diffenv == False:
from mystic.solvers import NelderMeadSimplexSolver as noDE
else:
from mystic.solvers import PowellDirectionalSolver as noDE
from mystic.termination import ChangeOverGeneration as COG
from mystic.tools import getch, random_seed
random_seed(123)
lb = [-100,-100,-100]
ub = [1000,1000,1000]
ndim = len(lb)
npop = 5
maxiter = 10
maxfun = 1e+6
convergence_tol = 1e-10; ngen = 100
crossover = 0.9
percent_change = 0.9
def cost(x):
ax,bx,c = x
return (ax)**2 - bx + c
if diffenv == True:
solver = DE(ndim,npop)
else:
solver = noDE(ndim)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(monitor)
#solver.SetGenerationMonitor(monitor)
tol = convergence_tol
solver.Solve(cost, termination=COG(tol,ngen))
solved = solver.Solution()
monitor.info("solved: %s" % solved)
func_max = -solver.bestEnergy
return solved, func_max
示例7: optimize
def optimize(cost,lower,upper,nbins):
from mystic.tools import random_seed
from pyina.launchers import TorqueMpi as Pool
random_seed(123)
# generate arrays of points defining a grid in parameter space
grid_dimensions = len(lower)
bins = []
for i in range(grid_dimensions):
step = abs(upper[i] - lower[i])/nbins[i]
bins.append( [lower[i] + (j+0.5)*step for j in range(nbins[i])] )
# build a grid of starting points
from mystic.math.grid import gridpts
from pool_helper import local_optimize
from pool_helper import nnodes, queue, timelimit
initial_values = gridpts(bins)
# run optimizer for each grid point
lb = [lower for i in range(len(initial_values))]
ub = [upper for i in range(len(initial_values))]
cf = [cost for i in range(len(initial_values))]
# map:: params, energy, func_evals = local_optimize(cost,x0,lb,ub)
config = {'queue':queue, 'timelimit':timelimit}
results = Pool(nnodes, **config).map(local_optimize,cf,initial_values,lb,ub)
#print "results = %s" % results
# get the results with the lowest energy
best = list(results[0][0]), results[0][1]
func_evals = results[0][2]
for result in results[1:]:
func_evals += result[2] # add function evaluations
if result[1] < best[1]: # compare energy
best = list(result[0]), result[1]
# return best
print "solved: %s" % best[0]
scale = 1.0
diameter_squared = -best[1] / scale #XXX: scale != 0
return diameter_squared, func_evals
示例8: McKerns
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2019 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/mystic/blob/master/LICENSE
from mystic.constraints import *
from mystic.penalty import quadratic_equality
from mystic.coupler import inner
from mystic.math import almostEqual
from mystic.tools import random_seed
random_seed(213)
def test_penalize():
from mystic.math.measures import mean, spread
def mean_constraint(x, target):
return mean(x) - target
def range_constraint(x, target):
return spread(x) - target
@quadratic_equality(condition=range_constraint, kwds={'target':5.0})
@quadratic_equality(condition=mean_constraint, kwds={'target':5.0})
def penalty(x):
return 0.0
def cost(x):
return abs(sum(x) - 5.0)
示例9: print_solution
NP = 40
MAX_GENERATIONS = NP*NP
NNODES = NP/5
seed = 100
if __name__=='__main__':
def print_solution(func):
print poly1d(func)
return
psow = VerboseMonitor(10)
ssow = VerboseMonitor(10)
random_seed(seed)
print "first sequential..."
solver = DifferentialEvolutionSolver2(ND,NP) #XXX: sequential
solver.SetRandomInitialPoints(min=[-100.0]*ND, max=[100.0]*ND)
solver.SetEvaluationLimits(generations=MAX_GENERATIONS)
solver.SetGenerationMonitor(ssow)
solver.Solve(ChebyshevCost, VTR(0.01), strategy=Best1Exp, \
CrossProbability=1.0, ScalingFactor=0.9, disp=1)
print ""
print_solution( solver.bestSolution )
#'''
random_seed(seed)
print "\n and now parallel..."
solver2 = DifferentialEvolutionSolver2(ND,NP) #XXX: parallel
solver2.SetMapper(Pool(NNODES).map)
示例10: impose_expectation
def impose_expectation(param, f, npts, bounds=None, weights=None, **kwds):
"""impose a given expextation value (m +/- D) on a given function f.
Optimiziation on f over the given bounds seeks a mean 'm' with deviation 'D'.
(this function is not 'mean-, range-, or variance-preserving')
Inputs:
param -- a tuple of target parameters: param = (mean, deviation)
f -- a function that takes a list and returns a number
npts -- a tuple of dimensions of the target product measure
bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds)
weights -- a list of sample weights
Additional Inputs:
constraints -- a function that takes a nested list of N x 1D discrete
measure positions and weights x' = constraints(x, w)
Outputs:
samples -- a list of sample positions
For example:
>>> # provide the dimensions and bounds
>>> nx = 3; ny = 2; nz = 1
>>> x_lb = [10.0]; y_lb = [0.0]; z_lb = [10.0]
>>> x_ub = [50.0]; y_ub = [9.0]; z_ub = [90.0]
>>>
>>> # prepare the bounds
>>> lb = (nx * x_lb) + (ny * y_lb) + (nz * z_lb)
>>> ub = (nx * x_ub) + (ny * y_ub) + (nz * z_ub)
>>>
>>> # generate a list of samples with mean +/- dev imposed
>>> mean = 2.0; dev = 0.01
>>> samples = impose_expectation((mean,dev), f, (nx,ny,nz), (lb,ub))
>>>
>>> # test the results by calculating the expectation value for the samples
>>> expectation(f, samples)
>>> 2.00001001012246015
"""
# param[0] is the target mean
# param[1] is the acceptable deviation from the target mean
# FIXME: the following is a HACK to recover from lost 'weights' information
# we 'mimic' discrete measures using the product measure weights
# plug in the 'constraints' function: samples' = constrain(samples, weights)
constrain = None # default is no constraints
if 'constraints' in kwds: constrain = kwds['constraints']
if not constrain: # if None (default), there are no constraints
constraints = lambda x: x
else: #XXX: better to use a standard "xk' = constrain(xk)" interface ?
def constraints(rv):
coords = _pack( _nested(rv,npts) )
coords = zip(*coords) # 'mimic' a nested list
coords = constrain(coords, [weights for i in range(len(coords))])
coords = zip(*coords) # revert back to a packed list
return _flat( _unpack(coords,npts) )
# construct cost function to reduce deviation from expectation value
def cost(rv):
"""compute cost from a 1-d array of model parameters,
where: cost = | E[model] - m |**2 """
# from mystic.math.measures import _pack, _nested, expectation
samples = _pack( _nested(rv,npts) )
Ex = expectation(f, samples, weights)
return (Ex - param[0])**2
# if bounds are not set, use the default optimizer bounds
if not bounds:
lower_bounds = []; upper_bounds = []
for n in npts:
lower_bounds += [None]*n
upper_bounds += [None]*n
else:
lower_bounds, upper_bounds = bounds
# construct and configure optimizer
debug = kwds['debug'] if 'debug' in kwds else False
npop = 200
maxiter = 1000; maxfun = 1e+6
crossover = 0.9; percent_change = 0.9
def optimize(cost,(lb,ub),tolerance,_constraints):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import VTR
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
if debug: random_seed(123)
evalmon = Monitor(); stepmon = Monitor()
if debug: stepmon = VerboseMonitor(10)
ndim = len(lb)
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \
CrossProbability=crossover,ScalingFactor=percent_change, \
constraints = _constraints)
#.........这里部分代码省略.........
示例11: test_griewangk
def test_griewangk():
"""Test Griewangk's function, which has many local minima.
Testing Griewangk:
Expected: x=[0.]*10 and f=0
Using DifferentialEvolutionSolver:
Solution: [ 8.87516194e-09 7.26058147e-09 1.02076001e-08 1.54219038e-08
-1.54328461e-08 2.34589663e-08 2.02809360e-08 -1.36385836e-08
1.38670373e-08 1.59668900e-08]
f value: 0.0
Iterations: 4120
Function evaluations: 205669
Time elapsed: 34.4936850071 seconds
Using DifferentialEvolutionSolver2:
Solution: [ -2.02709316e-09 3.22017968e-09 1.55275472e-08 5.26739541e-09
-2.18490470e-08 3.73725584e-09 -1.02315312e-09 1.24680355e-08
-9.47898116e-09 2.22243557e-08]
f value: 0.0
Iterations: 4011
Function evaluations: 200215
Time elapsed: 32.8412370682 seconds
"""
print "Testing Griewangk:"
print "Expected: x=[0.]*10 and f=0"
from mystic.models import griewangk as costfunc
ndim = 10
lb = [-400.]*ndim
ub = [400.]*ndim
maxiter = 10000
seed = 123 # Re-seed for each solver to have them all start at same x0
# DifferentialEvolutionSolver
print "\nUsing DifferentialEvolutionSolver:"
npop = 50
random_seed(seed)
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration as COG
from mystic.termination import CandidateRelativeTolerance as CRT
from mystic.termination import VTR
from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
esow = Monitor()
ssow = Monitor()
solver = DifferentialEvolutionSolver(ndim, npop)
solver.SetRandomInitialPoints(lb, ub)
solver.SetStrictRanges(lb, ub)
solver.SetEvaluationLimits(generations=maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
solver.enable_signal_handler()
#term = COG(1e-10)
#term = CRT()
term = VTR(0.)
time1 = time.time() # Is this an ok way of timing?
solver.Solve(costfunc, term, strategy=Rand1Exp, \
CrossProbability=0.3, ScalingFactor=1.0)
sol = solver.Solution()
time_elapsed = time.time() - time1
fx = solver.bestEnergy
print "Solution: ", sol
print "f value: ", fx
print "Iterations: ", solver.generations
print "Function evaluations: ", len(esow.x)
print "Time elapsed: ", time_elapsed, " seconds"
assert almostEqual(fx, 0.0, tol=3e-3)
# DifferentialEvolutionSolver2
print "\nUsing DifferentialEvolutionSolver2:"
npop = 50
random_seed(seed)
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import ChangeOverGeneration as COG
from mystic.termination import CandidateRelativeTolerance as CRT
from mystic.termination import VTR
from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
esow = Monitor()
ssow = Monitor()
solver = DifferentialEvolutionSolver2(ndim, npop)
solver.SetRandomInitialPoints(lb, ub)
solver.SetStrictRanges(lb, ub)
solver.SetEvaluationLimits(generations=maxiter)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(ssow)
#term = COG(1e-10)
#term = CRT()
term = VTR(0.)
time1 = time.time() # Is this an ok way of timing?
solver.Solve(costfunc, term, strategy=Rand1Exp, \
CrossProbability=0.3, ScalingFactor=1.0)
sol = solver.Solution()
time_elapsed = time.time() - time1
fx = solver.bestEnergy
print "Solution: ", sol
print "f value: ", fx
print "Iterations: ", solver.generations
print "Function evaluations: ", len(esow.x)
print "Time elapsed: ", time_elapsed, " seconds"
assert almostEqual(fx, 0.0, tol=3e-3)
示例12: random_seed
pylab.plot(x,y,style)
pylab.legend(["Exact","Fitted"])
pylab.axis([-1.4,1.4,-2,8],'k-')
pylab.draw()
return
if __name__ == '__main__':
print "Differential Evolution"
print "======================"
# set range for random initial guess
ndim = 9
x0 = [(-100,100)]*ndim
random_seed(321)
# draw frame and exact coefficients
plot_exact()
# use DE to solve 8th-order Chebyshev coefficients
npop = 10*ndim
solution = diffev(chebyshev8cost,x0,npop)
# use pretty print for polynomials
print poly1d(solution)
# compare solution with actual 8th-order Chebyshev coefficients
print "\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs)
# plot solution versus exact coefficients
示例13: _run_solver
def _run_solver(self, early_terminate=False, **kwds):
from mystic.monitors import Monitor
import numpy
from mystic.tools import random_seed
random_seed(321)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(min = self.min, max = self.max)
if self.usebounds: solver.SetStrictRanges(self.min, self.max)
if self.uselimits: solver.SetEvaluationLimits(self.maxiter, self.maxfun)
if self.useevalmon: solver.SetEvaluationMonitor(esow)
if self.usestepmon: solver.SetGenerationMonitor(ssow)
solver.Solve(self.costfunction, self.term, **kwds)
sol = solver.Solution()
iter=1
#if self.uselimits and self.maxiter == 0: iter=0
# sanity check solver internals
self.assertTrue(solver.generations == len(solver._stepmon.y)-iter)
self.assertTrue(list(solver.bestSolution) == solver._stepmon.x[-1]) #XXX
self.assertTrue(solver.bestEnergy == solver._stepmon.y[-1])
self.assertTrue(solver.solution_history == solver._stepmon.x)
self.assertTrue(solver.energy_history == solver._stepmon.y)
if self.usestepmon:
self.assertTrue(ssow.x == solver._stepmon.x)
self.assertTrue(ssow.y == solver._stepmon.y)
if self.useevalmon:
self.assertTrue(solver.evaluations == len(solver._evalmon.y))
self.assertTrue(esow.x == solver._evalmon.x)
self.assertTrue(esow.y == solver._evalmon.y)
# Fail appropriately for solver/termination mismatch
if early_terminate:
self.assertTrue(solver.generations < 2)
return
g = solver.generations
calls = [(g+1)*self.NP, (2*g)+1]
iters = [g]
# Test early terminations
if self.uselimits and self.maxfun == 0:
calls += [1, 20] #XXX: scipy*
iters += [1] #XXX: scipy*
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxfun == 1:
calls += [1, 20] #XXX: scipy*
iters += [1] #XXX: scipy*
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxiter == 0:
calls += [1, 20] #XXX: scipy*
iters += [1] #XXX: scipy*
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxiter == 1:
calls += [20] #Powell's
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxiter >= 2 and self.maxiter <= 5:
calls += [52, 79, 107, 141] #Powell's
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
# Verify solution is close to exact
print sol
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.exact[i], self.precision)
return
示例14: _run_solver
def _run_solver(self, early_terminate=False, **kwds):
from mystic.monitors import Monitor
import numpy
from mystic.tools import random_seed
seed = 111 if self.maxiter is None else 321 #XXX: good numbers...
random_seed(seed)
esow = Monitor()
ssow = Monitor()
solver = self.solver
solver.SetRandomInitialPoints(min = self.min, max = self.max)
if self.usebounds: solver.SetStrictRanges(self.min, self.max)
if self.uselimits: solver.SetEvaluationLimits(self.maxiter, self.maxfun)
if self.useevalmon: solver.SetEvaluationMonitor(esow)
if self.usestepmon: solver.SetGenerationMonitor(ssow)
#### run solver, but trap output
_stdout = trap_stdout()
solver.Solve(self.costfunction, self.term, **kwds)
out = release_stdout(_stdout)
################################
sol = solver.Solution()
iter=1
#if self.uselimits and self.maxiter == 0: iter=0
# sanity check solver internals
self.assertTrue(solver.generations == len(solver._stepmon._y)-iter)
self.assertTrue(list(solver.bestSolution) == solver._stepmon.x[-1]) #XXX
self.assertTrue(solver.bestEnergy == solver._stepmon.y[-1])
self.assertTrue(solver.solution_history == solver._stepmon.x)
self.assertTrue(solver.energy_history == solver._stepmon.y)
if self.usestepmon:
self.assertTrue(ssow.x == solver._stepmon.x)
self.assertTrue(ssow.y == solver._stepmon.y)
self.assertTrue(ssow._y == solver._stepmon._y)
if self.useevalmon:
self.assertTrue(solver.evaluations == len(solver._evalmon._y))
self.assertTrue(esow.x == solver._evalmon.x)
self.assertTrue(esow.y == solver._evalmon.y)
self.assertTrue(esow._y == solver._evalmon._y)
# Fail appropriately for solver/termination mismatch
if early_terminate:
self.assertTrue(solver.generations < 2)
warn = "Warning: Invalid termination condition (nPop < 2)"
self.assertTrue(warn in out)
return
g = solver.generations
calls = [(g+1)*self.NP, (2*g)+1]
iters = [g]
# Test early terminations
if self.uselimits and self.maxfun == 0:
calls += [1, 20] #XXX: scipy*
iters += [1] #XXX: scipy*
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxfun == 1:
calls += [1, 20] #XXX: scipy*
iters += [1] #XXX: scipy*
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxiter == 0:
calls += [1, 20] #XXX: scipy*
iters += [1] #XXX: scipy*
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxiter == 1:
calls += [20] #Powell's
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
if self.uselimits and self.maxiter and 2 <= self.maxiter <= 5:
calls += [52, 79, 107, 141] #Powell's
self.assertTrue(solver.evaluations in calls)
self.assertTrue(solver.generations in iters)
return
# Verify solution is close to exact
#print(sol)
for i in range(len(sol)):
self.assertAlmostEqual(sol[i], self.exact[i], self.precision)
return