本文整理汇总了C++中vectord类的典型用法代码示例。如果您正苦于以下问题:C++ vectord类的具体用法?C++ vectord怎么用?C++ vectord使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了vectord类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run_nlopt
double run_nlopt(nlopt::algorithm algo, eval_func fpointer,
vectord& Xnext, int maxf, const std::vector<double>& vd,
const std::vector<double>& vu, void* objPointer)
{
double fmin = 0.0;
size_t n = Xnext.size();
nlopt::opt opt (algo,n);
std::vector<double> xstd(n);
opt.set_lower_bounds(vd);
opt.set_upper_bounds(vu);
opt.set_min_objective(fpointer, objPointer);
opt.set_maxeval(maxf);
// It seems BOBYQA can be unstable if the same point is repeated
// tested over and over. NLOPT bug?
opt.set_ftol_rel(1e-12);
opt.set_ftol_abs(1e-12);
std::copy(Xnext.begin(),Xnext.end(),xstd.begin());
try
{
opt.optimize(xstd, fmin);
}
catch (nlopt::roundoff_limited& e)
{
FILE_LOG(logDEBUG) << "NLOPT Warning: Potential roundoff error. "
<< "In general, this can be ignored.";
}
std::copy(xstd.begin(),xstd.end(),Xnext.begin());
return fmin;
}
示例2: FILE_LOG
void KernelModel::setKernel (const vectord &thetav,
const vectord &stheta,
std::string k_name,
size_t dim)
{
KernelFactory mKFactory;
mKernel.reset(mKFactory.create(k_name, dim));
if ((thetav.size() == 1) && (stheta.size() == 1) && (mKernel->nHyperParameters() != 1))
{
// We assume isotropic prior, so we replicate the vectors for all dimensions
size_t n = mKernel->nHyperParameters();
FILE_LOG(logINFO) << "Expected " << n << " hyperparameters."
<< " Replicating parameters and prior.";
vectord newthetav = svectord(n,thetav(0));
vectord newstheta = svectord(n,stheta(0));
setKernelPrior(newthetav,newstheta);
mKernel->setHyperParameters(newthetav);
}
else
{
setKernelPrior(thetav,stheta);
mKernel->setHyperParameters(thetav);
}
}
示例3: evaluateSample
double evaluateSample( const vectord &Xi )
{
double x[100];
for (size_t i = 0; i < Xi.size(); ++i)
x[i] = Xi(i);
return testFunction(Xi.size(),x,NULL,NULL);
};
示例4: setHyperParameters
void setHyperParameters(const vectord &theta)
{
if(theta.size() != n_params)
{
throw std::invalid_argument("Wrong number of kernel hyperparameters");
}
params = theta; //TODO: To make enough space. Make it more efficient.
std::transform(theta.begin(), theta.end(), params.begin(), (double (*)(double)) exp);
};
示例5: computeWeightedNorm2
inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
{
assert(n_inputs == x1.size());
assert(x1.size() == x2.size());
assert(x1.size() == params.size());
vectord xd = x1-x2;
vectord r = utils::ublas_elementwise_div(xd, params);
return norm_2(r);
};
示例6: setParameters
int setParameters(const vectord& params)
{
if(params.size() != n_params)
{
FILE_LOG(logERROR) << "Wrong number of mean function parameters";
return -1;
}
mConstParam = params(0);
mParameters = boost::numeric::ublas::project(params,
boost::numeric::ublas::range(1, params.size()));
return 0;
};
示例7: assert
/**************************************************************************************************
* Procedure *
* *
* Description: getSigmaPoints *
* Class : UnscentedExpectedImprovement *
**************************************************************************************************/
void UnscentedExpectedImprovement::getSigmaPoints(const vectord& x ,
const double scale ,
const int dim ,
const matrixd& matrix_noise ,
std::vector<vectord>& xx ,
std::vector<double>& w ,
const bool matrix_convert)
{
const size_t n = dim;
assert(matrix_noise.size1() == n);
assert(matrix_noise.size2() == n);
assert(x.size() == n);
matrixd px;
if (matrix_convert) px = UnscentedExpectedImprovement::convertMatrixNoise(matrix_noise, scale, dim);
else px = matrix_noise;
// Output variable intialization
xx = std::vector<vectord>();
w = std::vector<double>();
xx.push_back(x);
w .push_back(scale / (dim + scale));
// Calculate query_i
for (size_t col = 0; col < n; col += 1)
{
xx.push_back(x - boost::numeric::ublas::column(px, col));
xx.push_back(x + boost::numeric::ublas::column(px, col));
w .push_back(0.5 / (dim + scale));
w .push_back(0.5 / (dim + scale));
}
}
示例8: exit
/**************************************************************************************************
* Procecure *
* *
* Description: chooseActiveVariables *
* Class : iCubOptimizable *
**************************************************************************************************/
void iCubOptimizable::chooseActiveVariables(vectord& query)
{
if (dim != query.size())
{
cout << endl << "[ERROR] Query size is not equal to mask active components. From: iCubOptimizable::chooseActiveVariables.";
exit(-1);
}
uint variables_updated = 0;
vectord result = vectord(_original_dim, 0.0);
for (uint index = 0; index < _original_dim; index += 1)
{
if (_active_variables_mask[index] == true)
{
result[index] = query[variables_updated];
variables_updated += 1;
}
else
{
result[index] = _icubparams.default_query[index];
}
}
// Return new query
query = result;
}
示例9: setSamples
void Dataset::setSamples(const vectord &y)
{
mY = y;
for (size_t i=0; i<y.size(); ++i)
{
updateMinMax(i);
}
};
示例10: computeCrossCorrelation
inline void KernelModel::computeCrossCorrelation(const vecOfvec& XX,
const vectord &query,
vectord& knx)
{
std::vector<vectord>::const_iterator x_it = XX.begin();
vectord::iterator k_it = knx.begin();
while(x_it != XX.end())
{ *k_it++ = (*mKernel)(*x_it++, query); }
}
示例11: getFeatures
vectord getFeatures(const vectord& x)
{
using boost::numeric::ublas::range;
using boost::numeric::ublas::project;
vectord res(x.size()+1);
res(0) = 1;
project(res,range(1,res.size())) = x;
return res;
};
示例12: setKernelPrior
inline void KernelModel::setKernelPrior (const vectord &theta,
const vectord &s_theta)
{
for (size_t i = 0; i<theta.size(); ++i)
{
boost::math::normal n(theta(i),s_theta(i));
priorKernel.push_back(n);
}
};
示例13: gauss
double gauss(const vectord& x, const vectord& mu, const matrixd& sigma)
{
double n = static_cast<double>(x.size());
const vectord vd = x-mu;
matrixd invS = sigma;
bayesopt::utils::inverse_cholesky(sigma,invS);
matrixd sig = sigma;
return pow(2*M_PI,n/2)*pow(determinant(sig),0.5)*exp(-0.5*inner_prod(vd,prod(invS,vd)));
}
示例14: operator
double operator()( const vectord &x)
{
++nCalls;
size_t nDims = x.size();
double beta = sqrt(2*log(static_cast<double>(nCalls*nCalls))*(nDims+1)
+ log(static_cast<double>(nDims))*nDims*mCoef);
ProbabilityDistribution* d_ = mProc->prediction(x);
return d_->lowerConfidenceBound(beta);
};
示例15: assert
double NLOPT_Optimization::localTrialAround(vectord& Xnext)
{
assert(mDown.size() == Xnext.size());
assert(mUp.size() == Xnext.size());
const size_t n = Xnext.size();
for (size_t i = 0; i < n; ++i)
{
if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
{
FILE_LOG(logDEBUG) << Xnext;
throw std::invalid_argument("Local trial withour proper"
" initial point.");
}
}
nlopt::algorithm algo = nlopt::LN_BOBYQA;
eval_func fpointer = &(NLOPT_Optimization::evaluate_nlopt);
void* objPointer = static_cast<void *>(rbobj);
const size_t nIter = 20;
// std::vector<double> vd(n);
// std::vector<double> vu(n);
// for (size_t i = 0; i < n; ++i)
// {
// vd[i] = Xnext(i) - 0.01;
// vu[i] = Xnext(i) + 0.01;
// }
vectord start = Xnext;
double fmin = run_nlopt(algo,fpointer,Xnext,nIter,
mDown,mUp,objPointer);
FILE_LOG(logDEBUG) << "Near trial " << nIter << "|"
<< start << "-> " << Xnext << " f() ->" << fmin;
return fmin;
}