本文整理汇总了C++中population::set_x方法的典型用法代码示例。如果您正苦于以下问题:C++ population::set_x方法的具体用法?C++ population::set_x怎么用?C++ population::set_x使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类population
的用法示例。
在下文中一共展示了population::set_x方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: evolve
void ms::evolve(population &pop) const
{
// Let's store some useful variables.
const population::size_type NP = pop.size();
// Get out if there is nothing to do.
if (m_starts == 0 || NP == 0) {
return;
}
// Local population used in the algorithm iterations.
population working_pop(pop);
//ms main loop
for (int i=0; i< m_starts; ++i)
{
working_pop.reinit();
m_algorithm->evolve(working_pop);
if (working_pop.problem().compare_fc(working_pop.get_individual(working_pop.get_best_idx()).cur_f,working_pop.get_individual(working_pop.get_best_idx()).cur_c,
pop.get_individual(pop.get_worst_idx()).cur_f,pop.get_individual(pop.get_worst_idx()).cur_c
) )
{
//update best population replacing its worst individual with the good one just produced.
pop.set_x(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_x);
pop.set_v(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_v);
}
if (m_screen_output)
{
std::cout << i << ". " << "\tCurrent iteration best: " << working_pop.get_individual(working_pop.get_best_idx()).cur_f << "\tOverall champion: " << pop.champion().f << std::endl;
}
}
}
示例2: evolve
/// Evolve method.
void monte_carlo::evolve(population &pop) const
{
// Let's store some useful variables.
const problem::base &prob = pop.problem();
const problem::base::size_type prob_dimension = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension();
const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
const population::size_type pop_size = pop.size();
// Get out if there is nothing to do.
if (pop_size == 0 || m_max_eval == 0) {
return;
}
// Initialise temporary decision vector, fitness vector and decision vector.
decision_vector tmp_x(prob_dimension);
fitness_vector tmp_f(prob.get_f_dimension());
constraint_vector tmp_c(prob.get_c_dimension());
// Main loop.
for (std::size_t i = 0; i < m_max_eval; ++i) {
// Generate a random decision vector.
for (problem::base::size_type j = 0; j < prob_dimension - prob_i_dimension; ++j) {
tmp_x[j] = boost::uniform_real<double>(lb[j],ub[j])(m_drng);
}
for (problem::base::size_type j = prob_dimension - prob_i_dimension; j < prob_dimension; ++j) {
tmp_x[j] = boost::uniform_int<int>(lb[j],ub[j])(m_urng);
}
// Compute fitness and constraints.
prob.objfun(tmp_f,tmp_x);
prob.compute_constraints(tmp_c,tmp_x);
// Locate the worst individual.
const population::size_type worst_idx = pop.get_worst_idx();
if (prob.compare_fc(tmp_f,tmp_c,pop.get_individual(worst_idx).cur_f,pop.get_individual(worst_idx).cur_c)) {
pop.set_x(worst_idx,tmp_x);
}
}
}
示例3: evolve
//.........这里部分代码省略.........
if ( prob_f_dimension != 1 ) {
pagmo_throw(value_error,"The problem is not single objective and sa_corana is not suitable to solve it");
}
//Determines the number of temperature adjustment for the annealing procedure
const size_t n_T = m_niter / (m_step_adj * m_bin_size * Dc);
// Get out if there is nothing to do.
if (NP == 0 || m_niter == 0) {
return;
}
if (n_T == 0) {
pagmo_throw(value_error,"n_T is zero, increase niter");
}
//Starting point is the best individual
const int bestidx = pop.get_best_idx();
const decision_vector &x0 = pop.get_individual(bestidx).cur_x;
const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f;
//Determines the coefficient to dcrease the temperature
const double Tcoeff = std::pow(m_Tf/m_Ts,1.0/(double)(n_T));
//Stores the current and new points
decision_vector xNEW = x0, xOLD = xNEW;
fitness_vector fNEW = fit0, fOLD = fNEW;
//Stores the adaptive steps of each component (integer part included but not used)
decision_vector step(D,m_range);
//Stores the number of accepted points per component (integer part included but not used)
std::vector<int> acp(D,0) ;
double ratio = 0, currentT = m_Ts, probab = 0;
//Main SA loops
for (size_t jter = 0; jter < n_T; ++jter) {
for (int mter = 0; mter < m_step_adj; ++mter) {
for (int kter = 0; kter < m_bin_size; ++kter) {
size_t nter = boost::uniform_int<int>(0,Dc-1)(m_urng);
for (size_t numb = 0; numb < Dc ; ++numb) {
nter = (nter + 1) % Dc;
//We modify the current point actsol by mutating its nter component within
//a step that we will later adapt
xNEW[nter] = xOLD[nter] + boost::uniform_real<double>(-1,1)(m_drng) * step[nter] * (ub[nter]-lb[nter]);
// If new solution produced is infeasible ignore it
if ((xNEW[nter] > ub[nter]) || (xNEW[nter] < lb[nter])) {
xNEW[nter]=xOLD[nter];
continue;
}
//And we valuate the objective function for the new point
prob.objfun(fNEW,xNEW);
// We decide wether to accept or discard the point
if (prob.compare_fitness(fNEW,fOLD) ) {
//accept
xOLD[nter] = xNEW[nter];
fOLD = fNEW;
acp[nter]++; //Increase the number of accepted values
} else {
//test it with Boltzmann to decide the acceptance
probab = exp ( - fabs(fOLD[0] - fNEW[0] ) / currentT );
// we compare prob with a random probability.
if (probab > m_drng()) {
xOLD[nter] = xNEW[nter];
fOLD = fNEW;
acp[nter]++; //Increase the number of accepted values
} else {
xNEW[nter] = xOLD[nter];
}
} // end if
} // end for(nter = 0; ...
} // end for(kter = 0; ...
// adjust the step (adaptively)
for (size_t iter = 0; iter < Dc; ++iter) {
ratio = (double)acp[iter]/(double)m_bin_size;
acp[iter] = 0; //reset the counter
if (ratio > .6) {
//too many acceptances, increase the step by a factor 3 maximum
step[iter] = step [iter] * (1 + 2 *(ratio - .6)/.4);
} else {
if (ratio < .4) {
//too few acceptance, decrease the step by a factor 3 maximum
step [iter]= step [iter] / (1 + 2 * ((.4 - ratio)/.4));
};
};
//And if it becomes too large, reset it to its initial value
if ( step[iter] > m_range ) {
step [iter] = m_range;
};
}
}
// Cooling schedule
currentT *= Tcoeff;
}
if ( prob.compare_fitness(fOLD,fit0) ){
pop.set_x(bestidx,xOLD); //new evaluation is possible here......
std::transform(xOLD.begin(), xOLD.end(), pop.get_individual(bestidx).cur_x.begin(), xOLD.begin(),std::minus<double>());
pop.set_v(bestidx,xOLD);
}
}
示例4: evolve
/**
* Runs the NN_TSP algorithm.
*
* @param[in,out] pop input/output pagmo::population to be evolved.
*/
void nn_tsp::evolve(population &pop) const
{
const problem::base_tsp* prob;
//check if problem is of type pagmo::problem::base_tsp
try
{
prob = &dynamic_cast<const problem::base_tsp &>(pop.problem());
}
catch (const std::bad_cast& e)
{
pagmo_throw(value_error,"Problem not of type pagmo::problem::tsp, nn_tsp can only be called on problem::tsp problems");
}
// Let's store some useful variables.
const problem::base::size_type Nv = prob->get_n_cities();
//create individuals
decision_vector best_tour(Nv);
decision_vector new_tour(Nv);
//check input parameter
if (m_start_city < -1 || m_start_city > static_cast<int>(Nv-1)) {
pagmo_throw(value_error,"invalid value for the first vertex");
}
size_t first_city, Nt;
if(m_start_city == -1){
first_city = 0;
Nt = Nv;
}
else{
first_city = m_start_city;
Nt = m_start_city+1;
}
int length_best_tour, length_new_tour;
size_t nxt_city, min_idx;
std::vector<int> not_visited(Nv);
length_best_tour = 0;
//main loop
for (size_t i = first_city; i < Nt; i++) {
length_new_tour = 0;
for (size_t j = 0; j < Nv; j++) {
not_visited[j] = j;
}
new_tour[0] = i;
std::swap(not_visited[new_tour[0]],not_visited[Nv-1]);
for (size_t j = 1; j < Nv-1; j++) {
min_idx = 0;
nxt_city = not_visited[0];
for (size_t l = 1; l < Nv-j; l++) {
if(prob->distance(new_tour[j-1], not_visited[l]) < prob->distance(new_tour[j-1], nxt_city) )
{
min_idx = l;
nxt_city = not_visited[l];}
}
new_tour[j] = nxt_city;
length_new_tour += prob->distance(new_tour[j-1], nxt_city);
std::swap(not_visited[min_idx],not_visited[Nv-j-1]);
}
new_tour[Nv-1] = not_visited[0];
length_new_tour += prob->distance(new_tour[Nv-2], new_tour[Nv-1]);
length_new_tour += prob->distance(new_tour[Nv-1], new_tour[0]);
if(i == first_city || length_new_tour < length_best_tour){
best_tour = new_tour;
length_best_tour = length_new_tour;
}
}
//change representation of tour
population::size_type best_idx = pop.get_best_idx();
switch( prob->get_encoding() ) {
case problem::base_tsp::FULL:
pop.set_x(best_idx,prob->cities2full(best_tour));
break;
case problem::base_tsp::RANDOMKEYS:
pop.set_x(best_idx,prob->cities2randomkeys(best_tour,pop.get_individual(best_idx).cur_x));
break;
case problem::base_tsp::CITIES:
pop.set_x(best_idx,best_tour);
break;
}
} // end of evolve
示例5: evolve
//.........这里部分代码省略.........
* if the gradient falls below the grad_tol parameter, if the maximum number of iterations max_iter is exceeded or if
* the inner GSL routine call reports an error (which will be logged on std::cout). After the end of the minimisation process,
* the minimised decision vector will replace the best individual in the population, after being modified to fall within
* the problem bounds if necessary.
*
* @param[in,out] pop population to evolve.
*/
void gsl_gradient::evolve(population &pop) const
{
// Do nothing if the population is empty.
if (!pop.size()) {
return;
}
// Useful variables.
const problem::base &problem = pop.problem();
if (problem.get_f_dimension() != 1) {
pagmo_throw(value_error,"this algorithm does not support multi-objective optimisation");
}
if (problem.get_c_dimension()) {
pagmo_throw(value_error,"this algorithm does not support constrained optimisation");
}
const problem::base::size_type cont_size = problem.get_dimension() - problem.get_i_dimension();
if (!cont_size) {
pagmo_throw(value_error,"the problem has no continuous part");
}
// Extract the best individual.
const population::size_type best_ind_idx = pop.get_best_idx();
const population::individual_type &best_ind = pop.get_individual(best_ind_idx);
// GSL wrapper parameters structure.
objfun_wrapper_params params;
params.p = &problem;
// Integer part of the temporay decision vector must be filled with the integer part of the best individual,
// which will not be optimised.
params.x.resize(problem.get_dimension());
std::copy(best_ind.cur_x.begin() + cont_size, best_ind.cur_x.end(), params.x.begin() + cont_size);
params.f.resize(1);
params.step_size = m_numdiff_step_size;
// GSL function structure.
gsl_multimin_function_fdf gsl_func;
gsl_func.n = boost::numeric_cast<std::size_t>(cont_size);
gsl_func.f = &objfun_wrapper;
gsl_func.df = &d_objfun_wrapper;
gsl_func.fdf = &fd_objfun_wrapper;
gsl_func.params = (void *)¶ms;
// Minimiser.
gsl_multimin_fdfminimizer *s = 0;
// This will be the starting point.
gsl_vector *x = 0;
// Here we start the allocations.
// Recast as size_t here, in order to avoid potential overflows later.
const std::size_t s_cont_size = boost::numeric_cast<std::size_t>(cont_size);
// Allocate and check the allocation results.
x = gsl_vector_alloc(s_cont_size);
const gsl_multimin_fdfminimizer_type *minimiser = get_gsl_minimiser_ptr();
pagmo_assert(minimiser);
s = gsl_multimin_fdfminimizer_alloc(minimiser,s_cont_size);
// Check the allocations.
check_allocs(x,s);
// Fill in the starting point (from the best individual).
for (std::size_t i = 0; i < s_cont_size; ++i) {
gsl_vector_set(x,i,best_ind.cur_x[i]);
}
// Init the solver.
gsl_multimin_fdfminimizer_set(s,&gsl_func,x,m_step_size,m_tol);
// Iterate.
std::size_t iter = 0;
int status;
try {
do
{
++iter;
status = gsl_multimin_fdfminimizer_iterate(s);
if (status) {
break;
}
status = gsl_multimin_test_gradient(s->gradient,m_grad_tol);
} while (status == GSL_CONTINUE && iter < m_max_iter);
} catch (const std::exception &e) {
// Cleanup and re-throw.
cleanup(x,s);
throw e;
} catch (...) {
// Cleanup and throw.
cleanup(x,s);
pagmo_throw(std::runtime_error,"unknown exception caught in gsl_gradient::evolve");
}
// Free up resources.
cleanup(x,s);
// Check the generated individual and change it to respect the bounds as necessary.
for (problem::base::size_type i = 0; i < cont_size; ++i) {
if (params.x[i] < problem.get_lb()[i]) {
params.x[i] = problem.get_lb()[i];
}
if (params.x[i] > problem.get_ub()[i]) {
params.x[i] = problem.get_ub()[i];
}
}
// Replace the best individual.
pop.set_x(best_ind_idx,params.x);
}
示例6: evolve
//.........这里部分代码省略.........
std::cout << i << ' ' << one_ind_pop.get_individual(0).cur_f << std::endl;
}
break;
}
default:
pagmo_throw(value_error,"Invalid initialization type");
}
std::vector<fitness_vector> fitness(NP, fitness_vector(1));
for(size_t i=0; i < NP; i++){
switch( prob->get_encoding() ) {
case problem::base_tsp::FULL:
fitness[i] = prob->objfun(prob->full2cities(my_pop[i]));
break;
case problem::base_tsp::RANDOMKEYS:
fitness[i] = prob->objfun(prob->cities2randomkeys(my_pop[i], pop.get_individual(i).cur_x));
break;
case problem::base_tsp::CITIES:
fitness[i] = prob->objfun(my_pop[i]);
break;
}
}
decision_vector tmp_tour(Nv);
bool stop, changed;
size_t rnd_num, i2, pos1_c1, pos1_c2, pos2_c1, pos2_c2; //pos2_c1 denotes the position of city1 in parent2
fitness_vector fitness_tmp;
//InverOver main loop
for(int iter = 0; iter < m_gen; iter++) {
for(size_t i1 = 0; i1 < NP; i1++) {
tmp_tour = my_pop[i1];
pos1_c1 = unif_Nv();
stop = false;
changed = false;
while(!stop){
if(unif_01() < m_ri) {
rnd_num = unif_Nvless1();
pos1_c2 = (rnd_num == pos1_c1? Nv-1:rnd_num);
} else {
i2 = unif_NPless1();
i2 = (i2 == i1? NP-1:i2);
pos2_c1 = std::find(my_pop[i2].begin(),my_pop[i2].end(),tmp_tour[pos1_c1])-my_pop[i2].begin();
pos2_c2 = (pos2_c1 == Nv-1? 0:pos2_c1+1);
pos1_c2 = std::find(tmp_tour.begin(),tmp_tour.end(),my_pop[i2][pos2_c2])-tmp_tour.begin();
}
stop = (abs(pos1_c1-pos1_c2)==1 || static_cast<problem::base::size_type>(abs(pos1_c1-pos1_c2))==Nv-1);
if(!stop) {
changed = true;
if(pos1_c1<pos1_c2) {
for(size_t l=0; l < (double (pos1_c2-pos1_c1-1)/2); l++) {
std::swap(tmp_tour[pos1_c1+1+l],tmp_tour[pos1_c2-l]);
}
pos1_c1 = pos1_c2;
} else {
//inverts the section from c1 to c2 (see documentation Note3)
for(size_t l=0; l < (double (pos1_c1-pos1_c2-1)/2); l++) {
std::swap(tmp_tour[pos1_c2+l],tmp_tour[pos1_c1-l-1]);
}
pos1_c1 = (pos1_c2 == 0? Nv-1:pos1_c2-1);
}
}
} //end of while loop (looping over a single indvidual)
if(changed) {
switch(prob->get_encoding()) {
case problem::base_tsp::FULL:
fitness_tmp = prob->objfun(prob->full2cities(tmp_tour));
break;
case problem::base_tsp::RANDOMKEYS: //using "randomly" index 0 as a temporary template
fitness_tmp = prob->objfun(prob->cities2randomkeys(tmp_tour, pop.get_individual(0).cur_x));
break;
case problem::base_tsp::CITIES:
fitness_tmp = prob->objfun(tmp_tour);
break;
}
if(prob->compare_fitness(fitness_tmp,fitness[i1])) { //replace individual?
my_pop[i1] = tmp_tour;
fitness[i1][0] = fitness_tmp[0];
}
}
} // end of loop over population
} // end of loop over generations
//change representation of tour
for (size_t ii = 0; ii < NP; ii++) {
switch(prob->get_encoding()) {
case problem::base_tsp::FULL:
pop.set_x(ii,prob->cities2full(my_pop[ii]));
break;
case problem::base_tsp::RANDOMKEYS:
pop.set_x(ii,prob->cities2randomkeys(my_pop[ii],pop.get_individual(ii).cur_x));
break;
case problem::base_tsp::CITIES:
pop.set_x(ii,my_pop[ii]);
break;
}
}
} // end of evolve
示例7: evolve
// Evolve method.
void base_nlopt::evolve(population &pop) const
{
// Useful variables.
const problem::base &problem = pop.problem();
if (problem.get_f_dimension() != 1) {
pagmo_throw(value_error,"this algorithm does not support multi-objective optimisation");
}
const problem::base::c_size_type c_size = problem.get_c_dimension();
const problem::base::c_size_type ec_size = problem.get_c_dimension() - problem.get_ic_dimension();
if (c_size && !m_constrained) {
pagmo_throw(value_error,"this algorithm does not support constraints");
}
if (ec_size && m_only_ineq) {
pagmo_throw(value_error,"this algorithm does not support equality constraints");
}
const problem::base::size_type cont_size = problem.get_dimension() - problem.get_i_dimension();
if (!cont_size) {
pagmo_throw(value_error,"the problem has no continuous part");
}
// Do nothing if the population is empty.
if (!pop.size()) {
return;
}
// Extract the best individual and set the inital point
const population::size_type best_ind_idx = pop.get_best_idx();
const population::individual_type &best_ind = pop.get_individual(best_ind_idx);
// Structure to pass data to the objective function wrapper.
nlopt_wrapper_data data_objfun;
data_objfun.prob = &problem;
data_objfun.x.resize(problem.get_dimension());
data_objfun.dx.resize(problem.get_dimension());
data_objfun.f.resize(1);
// Structure to pass data to the constraint function wrapper.
std::vector<nlopt_wrapper_data> data_constrfun(boost::numeric_cast<std::vector<nlopt_wrapper_data>::size_type>(c_size));
for (problem::base::c_size_type i = 0; i < c_size; ++i) {
data_constrfun[i].prob = &problem;
data_constrfun[i].x.resize(problem.get_dimension());
data_constrfun[i].dx.resize(problem.get_dimension());
data_constrfun[i].c.resize(problem.get_c_dimension());
data_constrfun[i].c_comp = i;
}
// Main NLopt call.
nlopt::opt opt(m_algo, problem.get_dimension());
m_opt = opt;
// Sets local optimizer for aug_lag methods, do nothing otherwise
set_local(problem.get_dimension());
m_opt.set_lower_bounds(problem.get_lb());
m_opt.set_upper_bounds(problem.get_ub());
m_opt.set_min_objective(objfun_wrapper, &data_objfun);
for (problem::base::c_size_type i =0; i<ec_size; ++i) {
m_opt.add_equality_constraint(constraints_wrapper, &data_constrfun[i], problem.get_c_tol().at(i));
}
for (problem::base::c_size_type i =ec_size; i<c_size; ++i) {
m_opt.add_inequality_constraint(constraints_wrapper, &data_constrfun[i], problem.get_c_tol().at(i));
}
m_opt.set_ftol_abs(m_ftol);
m_opt.set_xtol_abs(m_xtol);
m_opt.set_maxeval(m_max_iter);
//nlopt::result result;
double dummy;
decision_vector x0(best_ind.cur_x);
m_opt.optimize(x0, dummy);
pop.set_x(best_ind_idx,x0);
}
示例8: evolve
//.........这里部分代码省略.........
// Load the data for SnoptProblem ...
SnoptProblem.setProblemSize( n, neF );
SnoptProblem.setNeG( lenG );
SnoptProblem.setNeA( lenA );
SnoptProblem.setA ( lenA, iAfun, jAvar, A );
SnoptProblem.setG ( lenG, iGfun, jGvar );
SnoptProblem.setObjective ( ObjRow, ObjAdd );
SnoptProblem.setX ( x, xlow, xupp, xmul, xstate );
SnoptProblem.setF ( F, Flow, Fupp, Fmul, Fstate );
SnoptProblem.setXNames ( xnames, nxnames );
SnoptProblem.setFNames ( Fnames, nFnames );
SnoptProblem.setProbName ( name.c_str() ); //This is limited to be 8 characters!!!
SnoptProblem.setUserFun ( snopt_function_ );
//We set some parameters
if (m_screen_output) SnoptProblem.setIntParameter("Summary file",6);
if (m_file_out) SnoptProblem.setPrintFile ( name.c_str() );
SnoptProblem.setIntParameter ( "Derivative option", 0 );
SnoptProblem.setIntParameter ( "Major iterations limit", m_major);
SnoptProblem.setIntParameter ( "Iterations limit",100000);
SnoptProblem.setRealParameter( "Major feasibility tolerance", m_feas);
SnoptProblem.setRealParameter( "Major optimality tolerance", m_opt);
//We set the sparsity structure
int neG;
try
{
std::vector<int> iGfun_vect, jGvar_vect;
prob.set_sparsity(neG,iGfun_vect,jGvar_vect);
for (int i=0; i < neG; i++)
{
iGfun[i] = iGfun_vect[i];
jGvar[i] = jGvar_vect[i];
}
SnoptProblem.setNeG( neG );
SnoptProblem.setNeA( 0 );
SnoptProblem.setG( lenG, iGfun, jGvar );
} //the user did implement the sparsity in the problem
catch (not_implemented_error)
{
SnoptProblem.computeJac();
neG = SnoptProblem.getNeG();
} //the user did not implement the sparsity in the problem
if (m_screen_output)
{
std::cout << "PaGMO 4 SNOPT:" << std::endl << std::endl;
std::cout << "Sparsity pattern set, NeG = " << neG << std::endl;
std::cout << "iGfun: [";
for (int i=0; i<neG-1; ++i) std::cout << iGfun[i] << ",";
std::cout << iGfun[neG-1] << "]" << std::endl;
std::cout << "jGvar: [";
for (int i=0; i<neG-1; ++i) std::cout << jGvar[i] << ",";
std::cout << jGvar[neG-1] << "]" << std::endl;
}
integer Cold = 0;
//HERE WE CALL snoptA routine!!!!!
SnoptProblem.solve( Cold );
//Save the final point making sure it is within the linear bounds
std::copy(x,x+n,di_comodo.x.begin());
decision_vector newx = di_comodo.x;
std::transform(di_comodo.x.begin(), di_comodo.x.end(), pop.get_individual(bestidx).cur_x.begin(), di_comodo.x.begin(),std::minus<double>());
for (integer i=0; i<n; i++)
{
newx[i] = std::min(std::max(lb[i],newx[i]),ub[i]);
}
pop.set_x(bestidx,newx);
pop.set_v(bestidx,di_comodo.x);
//Clean up memory allocated to call the snoptA routine
delete []iAfun;
delete []jAvar;
delete []A;
delete []iGfun;
delete []jGvar;
delete []x;
delete []xlow;
delete []xupp;
delete []xmul;
delete []xstate;
delete []F;
delete []Flow;
delete []Fupp;
delete []Fmul;
delete []Fstate;
delete []xnames;
delete []Fnames;
}
示例9: evolve
void bee_colony::evolve(population &pop) const
{
// Let's store some useful variables.
const problem::base &prob = pop.problem();
const problem::base::size_type prob_i_dimension = prob.get_i_dimension(), D = prob.get_dimension(), Dc = D - prob_i_dimension, prob_c_dimension = prob.get_c_dimension();
const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
const population::size_type NP = (int) pop.size();
//We perform some checks to determine wether the problem/population are suitable for ABC
if ( Dc == 0 ) {
pagmo_throw(value_error,"There is no continuous part in the problem decision vector for ABC to optimise");
}
if ( prob.get_f_dimension() != 1 ) {
pagmo_throw(value_error,"The problem is not single objective and ABC is not suitable to solve it");
}
if ( prob_c_dimension != 0 ) {
pagmo_throw(value_error,"The problem is not box constrained and ABC is not suitable to solve it");
}
if (NP < 2) {
pagmo_throw(value_error,"for ABC at least 2 individuals in the population are needed");
}
// Get out if there is nothing to do.
if (m_iter == 0) {
return;
}
// Some vectors used during evolution are allocated here.
fitness_vector fnew(prob.get_f_dimension());
decision_vector dummy(D,0); //used for initialisation purposes
std::vector<decision_vector > X(NP,dummy); //set of food sources
std::vector<fitness_vector> fit(NP); //food sources fitness
decision_vector temp_solution(D,0);
std::vector<int> trial(NP,0);
std::vector<double> probability(NP);
population::size_type neighbour = 0;
decision_vector::size_type param2change = 0;
std::vector<double> selectionfitness(NP), cumsum(NP), cumsumTemp(NP);
std::vector <population::size_type> selection(NP);
double r = 0;
// Copy the food sources position and their fitness
for ( population::size_type i = 0; i<NP; i++ ) {
X[i] = pop.get_individual(i).cur_x;
fit[i] = pop.get_individual(i).cur_f;
}
// Main ABC loop
for (int j = 0; j < m_iter; ++j) {
//1- Send employed bees
for (population::size_type ii = 0; ii< NP; ++ii) {
//selects a random component (only of the continuous part) of the decision vector
param2change = boost::uniform_int<decision_vector::size_type>(0,Dc-1)(m_urng);
//randomly chose a solution to be used to produce a mutant solution of solution ii
//randomly selected solution must be different from ii
do{
neighbour = boost::uniform_int<population::size_type>(0,NP-1)(m_urng);
}
while(neighbour == ii);
//copy local solution into temp_solution (the whole decision_vector, also the integer part)
for(population::size_type i=0; i<D; ++i) {
temp_solution[i] = X[ii][i];
}
//mutate temp_solution
temp_solution[param2change] = X[ii][param2change] + boost::uniform_real<double>(-1,1)(m_drng) * (X[ii][param2change] - X[neighbour][param2change]);
//if generated parameter value is out of boundaries, it is shifted onto the boundaries*/
if (temp_solution[param2change]<lb[param2change]) {
temp_solution[param2change] = lb[param2change];
}
if (temp_solution[param2change]>ub[param2change]) {
temp_solution[param2change] = ub[param2change];
}
//Calling void prob.objfun(fitness_vector,decision_vector) is more efficient as no memory allocation occur
//A call to fitness_vector prob.objfun(decision_vector) allocates memory for the return value.
prob.objfun(fnew,temp_solution);
//If the new solution is better than the old one replace it with the mutant one and reset its trial counter
if(prob.compare_fitness(fnew, fit[ii])) {
X[ii][param2change] = temp_solution[param2change];
pop.set_x(ii,X[ii]);
prob.objfun(fit[ii], X[ii]); //update the fitness vector
trial[ii] = 0;
}
else {
trial[ii]++; //if the solution can't be improved incrase its trial counter
}
//.........这里部分代码省略.........
示例10: evolve
void cs::evolve(population &pop) const
{
// Let's store some useful variables.
const problem::base &prob = pop.problem();
const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
const population::size_type NP = pop.size();
const problem::base::size_type Dc = D - prob_i_dimension;
//We perform some checks to determine whether the problem/population are suitable for compass search
if ( Dc == 0 ) {
pagmo_throw(value_error,"There is no continuous part in the problem decision vector for compass search to optimise");
}
if ( prob_c_dimension != 0 ) {
pagmo_throw(value_error,"The problem is not box constrained and compass search is not suitable to solve it");
}
if ( prob_f_dimension != 1 ) {
pagmo_throw(value_error,"The problem is not single objective and compass search is not suitable to solve it");
}
// Get out if there is nothing to do.
if (NP == 0 || m_max_eval == 0) {
return;
}
//Starting point is the best individual
const int bestidx = pop.get_best_idx();
const decision_vector &x0 = pop.get_individual(bestidx).cur_x;
const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f;
decision_vector x=x0,newx;
fitness_vector f=fit0,newf=fit0;
bool flag = false;
int eval=0;
double newrange=m_start_range;
while (newrange > m_stop_range && eval <= m_max_eval) {
flag = false;
for (unsigned int i=0; i<Dc; i++) {
newx=x;
//move up
newx[i] = x[i] + newrange * (ub[i]-lb[i]);
//feasibility correction
if (newx[i] > ub [i]) newx[i]=ub[i];
prob.objfun(newf,newx); eval++;
if (prob.compare_fitness(newf,f)) {
f = newf;
x = newx;
flag=true;
break; //accept
}
//move down
newx[i] = x[i] - newrange * (ub[i]-lb[i]);
//feasibility correction
if (newx[i] < lb [i]) newx[i]=lb[i];
prob.objfun(newf,newx); eval++;
if (prob.compare_fitness(newf,f)) { //accept
f = newf;
x = newx;
flag=true;
break;
}
}
if (!flag) {
newrange *= m_reduction_coeff;
}
} //end while
std::transform(x.begin(), x.end(), pop.get_individual(bestidx).cur_x.begin(), newx.begin(),std::minus<double>()); // newx is now velocity
pop.set_x(bestidx,x); //new evaluation is possible here......
pop.set_v(bestidx,newx);
}
示例11: evolve
//.........这里部分代码省略.........
default:
pagmo_throw(value_error,"Invalid initialization type");
}
//compute fitness of individuals (necessary if weight matrix is not symmetric)
std::vector<double> fitness(NP, 0);
if(!is_sym){
for(size_t i=0; i < NP; i++){
fitness[i] = weights[my_pop[i][Nv-1]][my_pop[i][0]];
for(size_t k=1; k < Nv; k++){
fitness[i] += weights[my_pop[i][k-1]][my_pop[i][k]];
}
}
}
decision_vector tmp_tour(Nv);
bool stop;
size_t rnd_num, i2, pos1_c1, pos1_c2, pos2_c1, pos2_c2; //pos2_c1 denotes the position of city1 in parent2
double fitness_change, fitness_tmp = 0;
//InverOver main loop
for(int iter = 0; iter < m_gen; iter++){
for(size_t i1 = 0; i1 < NP; i1++){
fitness_change = 0;
tmp_tour = my_pop[i1];
pos1_c1 = unif_Nv();
stop = false;
while(!stop){
if(unif_01() < m_ri){
rnd_num = unif_Nvless1();
pos1_c2 = (rnd_num == pos1_c1? Nv-1:rnd_num);
}
else{
i2 = unif_NPless1();
i2 = (i2 == i1? NP-1:i2);
pos2_c1 = std::find(my_pop[i2].begin(),my_pop[i2].end(),tmp_tour[pos1_c1])-my_pop[i2].begin();
pos2_c2 = (pos2_c1 == Nv-1? 0:pos2_c1+1);
pos1_c2 = std::find(tmp_tour.begin(),tmp_tour.end(),my_pop[i2][pos2_c2])-tmp_tour.begin();
}
stop = (abs(pos1_c1-pos1_c2)==1 || abs(pos1_c1-pos1_c2)==Nv-1);
if(!stop){
if(pos1_c1<pos1_c2){
for(size_t l=0; l < (double (pos1_c2-pos1_c1-1)/2); l++){
std::swap(tmp_tour[pos1_c1+1+l],tmp_tour[pos1_c2-l]);}
if(is_sym){
fitness_change -= weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c2]] + weights[tmp_tour[pos1_c1+1]][tmp_tour[pos1_c2+1 - (pos1_c2+1 > Nv-1? Nv:0)]];
fitness_change += weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c1+1]] + weights[tmp_tour[pos1_c2]][tmp_tour[pos1_c2+1 - (pos1_c2+1 > Nv-1? Nv:0)]];
}
}
else{
//inverts the section from c1 to c2 (see documentation Note3)
for(size_t l=0; l < (double (Nv-(pos1_c1-pos1_c2)-1)/2); l++){
std::swap(tmp_tour[pos1_c1+1+l - (pos1_c1+1+l>Nv-1? Nv:0)],tmp_tour[pos1_c2-l + (pos1_c2<l? Nv:0)]);}
if(is_sym){
fitness_change -= weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c2]] + weights[tmp_tour[pos1_c1+1 - (pos1_c1+1 > Nv-1? Nv:0)]][tmp_tour[pos1_c2+1]];
fitness_change += weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c1+1 - (pos1_c1+1 > Nv-1? Nv:0)]] + weights[tmp_tour[pos1_c2]][tmp_tour[pos1_c2+1]];
}
}
pos1_c1 = pos1_c2; //better performance than original Inver-Over (shorter tour in less time)
}
} //end of while loop (looping over a single indvidual)
if(!is_sym){ //compute fitness of the temporary tour
fitness_tmp = weights[tmp_tour[Nv-1]][tmp_tour[0]];
for(size_t k=1; k < Nv; k++){
fitness_tmp += weights[tmp_tour[k-1]][tmp_tour[k]];
}
fitness_change = fitness_tmp - fitness[i1];
}
if(fitness_change < 0){ //replace individual?
my_pop[i1] = tmp_tour;
if(!is_sym){
fitness[i1] = fitness_tmp;
}
}
} //end of loop over population
} //end of loop over generations
//change representation of tour
for (size_t ii = 0; ii < NP; ii++) {
switch( prob->get_encoding() ) {
case problem::tsp::FULL:
pop.set_x(ii,prob->cities2full(my_pop[ii]));
break;
case problem::tsp::RANDOMKEYS:
pop.set_x(ii,prob->cities2randomkeys(my_pop[ii],pop.get_individual(ii).cur_x));
break;
case problem::tsp::CITIES:
pop.set_x(ii,my_pop[ii]);
break;
}
}
} // end of evolve