本文整理汇总了C++中population::set_v方法的典型用法代码示例。如果您正苦于以下问题:C++ population::set_v方法的具体用法?C++ population::set_v怎么用?C++ population::set_v使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类population
的用法示例。
在下文中一共展示了population::set_v方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: evolve
void ms::evolve(population &pop) const
{
// Let's store some useful variables.
const population::size_type NP = pop.size();
// Get out if there is nothing to do.
if (m_starts == 0 || NP == 0) {
return;
}
// Local population used in the algorithm iterations.
population working_pop(pop);
//ms main loop
for (int i=0; i< m_starts; ++i)
{
working_pop.reinit();
m_algorithm->evolve(working_pop);
if (working_pop.problem().compare_fc(working_pop.get_individual(working_pop.get_best_idx()).cur_f,working_pop.get_individual(working_pop.get_best_idx()).cur_c,
pop.get_individual(pop.get_worst_idx()).cur_f,pop.get_individual(pop.get_worst_idx()).cur_c
) )
{
//update best population replacing its worst individual with the good one just produced.
pop.set_x(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_x);
pop.set_v(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_v);
}
if (m_screen_output)
{
std::cout << i << ". " << "\tCurrent iteration best: " << working_pop.get_individual(working_pop.get_best_idx()).cur_f << "\tOverall champion: " << pop.champion().f << std::endl;
}
}
}
示例2: evolve
//.........这里部分代码省略.........
if ( prob_f_dimension != 1 ) {
pagmo_throw(value_error,"The problem is not single objective and sa_corana is not suitable to solve it");
}
//Determines the number of temperature adjustment for the annealing procedure
const size_t n_T = m_niter / (m_step_adj * m_bin_size * Dc);
// Get out if there is nothing to do.
if (NP == 0 || m_niter == 0) {
return;
}
if (n_T == 0) {
pagmo_throw(value_error,"n_T is zero, increase niter");
}
//Starting point is the best individual
const int bestidx = pop.get_best_idx();
const decision_vector &x0 = pop.get_individual(bestidx).cur_x;
const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f;
//Determines the coefficient to dcrease the temperature
const double Tcoeff = std::pow(m_Tf/m_Ts,1.0/(double)(n_T));
//Stores the current and new points
decision_vector xNEW = x0, xOLD = xNEW;
fitness_vector fNEW = fit0, fOLD = fNEW;
//Stores the adaptive steps of each component (integer part included but not used)
decision_vector step(D,m_range);
//Stores the number of accepted points per component (integer part included but not used)
std::vector<int> acp(D,0) ;
double ratio = 0, currentT = m_Ts, probab = 0;
//Main SA loops
for (size_t jter = 0; jter < n_T; ++jter) {
for (int mter = 0; mter < m_step_adj; ++mter) {
for (int kter = 0; kter < m_bin_size; ++kter) {
size_t nter = boost::uniform_int<int>(0,Dc-1)(m_urng);
for (size_t numb = 0; numb < Dc ; ++numb) {
nter = (nter + 1) % Dc;
//We modify the current point actsol by mutating its nter component within
//a step that we will later adapt
xNEW[nter] = xOLD[nter] + boost::uniform_real<double>(-1,1)(m_drng) * step[nter] * (ub[nter]-lb[nter]);
// If new solution produced is infeasible ignore it
if ((xNEW[nter] > ub[nter]) || (xNEW[nter] < lb[nter])) {
xNEW[nter]=xOLD[nter];
continue;
}
//And we valuate the objective function for the new point
prob.objfun(fNEW,xNEW);
// We decide wether to accept or discard the point
if (prob.compare_fitness(fNEW,fOLD) ) {
//accept
xOLD[nter] = xNEW[nter];
fOLD = fNEW;
acp[nter]++; //Increase the number of accepted values
} else {
//test it with Boltzmann to decide the acceptance
probab = exp ( - fabs(fOLD[0] - fNEW[0] ) / currentT );
// we compare prob with a random probability.
if (probab > m_drng()) {
xOLD[nter] = xNEW[nter];
fOLD = fNEW;
acp[nter]++; //Increase the number of accepted values
} else {
xNEW[nter] = xOLD[nter];
}
} // end if
} // end for(nter = 0; ...
} // end for(kter = 0; ...
// adjust the step (adaptively)
for (size_t iter = 0; iter < Dc; ++iter) {
ratio = (double)acp[iter]/(double)m_bin_size;
acp[iter] = 0; //reset the counter
if (ratio > .6) {
//too many acceptances, increase the step by a factor 3 maximum
step[iter] = step [iter] * (1 + 2 *(ratio - .6)/.4);
} else {
if (ratio < .4) {
//too few acceptance, decrease the step by a factor 3 maximum
step [iter]= step [iter] / (1 + 2 * ((.4 - ratio)/.4));
};
};
//And if it becomes too large, reset it to its initial value
if ( step[iter] > m_range ) {
step [iter] = m_range;
};
}
}
// Cooling schedule
currentT *= Tcoeff;
}
if ( prob.compare_fitness(fOLD,fit0) ){
pop.set_x(bestidx,xOLD); //new evaluation is possible here......
std::transform(xOLD.begin(), xOLD.end(), pop.get_individual(bestidx).cur_x.begin(), xOLD.begin(),std::minus<double>());
pop.set_v(bestidx,xOLD);
}
}
示例3: evolve
//.........这里部分代码省略.........
// Load the data for SnoptProblem ...
SnoptProblem.setProblemSize( n, neF );
SnoptProblem.setNeG( lenG );
SnoptProblem.setNeA( lenA );
SnoptProblem.setA ( lenA, iAfun, jAvar, A );
SnoptProblem.setG ( lenG, iGfun, jGvar );
SnoptProblem.setObjective ( ObjRow, ObjAdd );
SnoptProblem.setX ( x, xlow, xupp, xmul, xstate );
SnoptProblem.setF ( F, Flow, Fupp, Fmul, Fstate );
SnoptProblem.setXNames ( xnames, nxnames );
SnoptProblem.setFNames ( Fnames, nFnames );
SnoptProblem.setProbName ( name.c_str() ); //This is limited to be 8 characters!!!
SnoptProblem.setUserFun ( snopt_function_ );
//We set some parameters
if (m_screen_output) SnoptProblem.setIntParameter("Summary file",6);
if (m_file_out) SnoptProblem.setPrintFile ( name.c_str() );
SnoptProblem.setIntParameter ( "Derivative option", 0 );
SnoptProblem.setIntParameter ( "Major iterations limit", m_major);
SnoptProblem.setIntParameter ( "Iterations limit",100000);
SnoptProblem.setRealParameter( "Major feasibility tolerance", m_feas);
SnoptProblem.setRealParameter( "Major optimality tolerance", m_opt);
//We set the sparsity structure
int neG;
try
{
std::vector<int> iGfun_vect, jGvar_vect;
prob.set_sparsity(neG,iGfun_vect,jGvar_vect);
for (int i=0; i < neG; i++)
{
iGfun[i] = iGfun_vect[i];
jGvar[i] = jGvar_vect[i];
}
SnoptProblem.setNeG( neG );
SnoptProblem.setNeA( 0 );
SnoptProblem.setG( lenG, iGfun, jGvar );
} //the user did implement the sparsity in the problem
catch (not_implemented_error)
{
SnoptProblem.computeJac();
neG = SnoptProblem.getNeG();
} //the user did not implement the sparsity in the problem
if (m_screen_output)
{
std::cout << "PaGMO 4 SNOPT:" << std::endl << std::endl;
std::cout << "Sparsity pattern set, NeG = " << neG << std::endl;
std::cout << "iGfun: [";
for (int i=0; i<neG-1; ++i) std::cout << iGfun[i] << ",";
std::cout << iGfun[neG-1] << "]" << std::endl;
std::cout << "jGvar: [";
for (int i=0; i<neG-1; ++i) std::cout << jGvar[i] << ",";
std::cout << jGvar[neG-1] << "]" << std::endl;
}
integer Cold = 0;
//HERE WE CALL snoptA routine!!!!!
SnoptProblem.solve( Cold );
//Save the final point making sure it is within the linear bounds
std::copy(x,x+n,di_comodo.x.begin());
decision_vector newx = di_comodo.x;
std::transform(di_comodo.x.begin(), di_comodo.x.end(), pop.get_individual(bestidx).cur_x.begin(), di_comodo.x.begin(),std::minus<double>());
for (integer i=0; i<n; i++)
{
newx[i] = std::min(std::max(lb[i],newx[i]),ub[i]);
}
pop.set_x(bestidx,newx);
pop.set_v(bestidx,di_comodo.x);
//Clean up memory allocated to call the snoptA routine
delete []iAfun;
delete []jAvar;
delete []A;
delete []iGfun;
delete []jGvar;
delete []x;
delete []xlow;
delete []xupp;
delete []xmul;
delete []xstate;
delete []F;
delete []Flow;
delete []Fupp;
delete []Fmul;
delete []Fstate;
delete []xnames;
delete []Fnames;
}
示例4: evolve
void cs::evolve(population &pop) const
{
// Let's store some useful variables.
const problem::base &prob = pop.problem();
const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
const population::size_type NP = pop.size();
const problem::base::size_type Dc = D - prob_i_dimension;
//We perform some checks to determine whether the problem/population are suitable for compass search
if ( Dc == 0 ) {
pagmo_throw(value_error,"There is no continuous part in the problem decision vector for compass search to optimise");
}
if ( prob_c_dimension != 0 ) {
pagmo_throw(value_error,"The problem is not box constrained and compass search is not suitable to solve it");
}
if ( prob_f_dimension != 1 ) {
pagmo_throw(value_error,"The problem is not single objective and compass search is not suitable to solve it");
}
// Get out if there is nothing to do.
if (NP == 0 || m_max_eval == 0) {
return;
}
//Starting point is the best individual
const int bestidx = pop.get_best_idx();
const decision_vector &x0 = pop.get_individual(bestidx).cur_x;
const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f;
decision_vector x=x0,newx;
fitness_vector f=fit0,newf=fit0;
bool flag = false;
int eval=0;
double newrange=m_start_range;
while (newrange > m_stop_range && eval <= m_max_eval) {
flag = false;
for (unsigned int i=0; i<Dc; i++) {
newx=x;
//move up
newx[i] = x[i] + newrange * (ub[i]-lb[i]);
//feasibility correction
if (newx[i] > ub [i]) newx[i]=ub[i];
prob.objfun(newf,newx); eval++;
if (prob.compare_fitness(newf,f)) {
f = newf;
x = newx;
flag=true;
break; //accept
}
//move down
newx[i] = x[i] - newrange * (ub[i]-lb[i]);
//feasibility correction
if (newx[i] < lb [i]) newx[i]=lb[i];
prob.objfun(newf,newx); eval++;
if (prob.compare_fitness(newf,f)) { //accept
f = newf;
x = newx;
flag=true;
break;
}
}
if (!flag) {
newrange *= m_reduction_coeff;
}
} //end while
std::transform(x.begin(), x.end(), pop.get_individual(bestidx).cur_x.begin(), newx.begin(),std::minus<double>()); // newx is now velocity
pop.set_x(bestidx,x); //new evaluation is possible here......
pop.set_v(bestidx,newx);
}