本文整理汇总了C++中api::IFunction_sptr::nParams方法的典型用法代码示例。如果您正苦于以下问题:C++ IFunction_sptr::nParams方法的具体用法?C++ IFunction_sptr::nParams怎么用?C++ IFunction_sptr::nParams使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类api::IFunction_sptr
的用法示例。
在下文中一共展示了IFunction_sptr::nParams方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: logic_error
/// Constructor
FunctionGenerator::FunctionGenerator(API::IFunction_sptr source)
: m_source(source), m_nOwnParams(source->nParams()), m_dirty(true) {
if (!m_source) {
throw std::logic_error(
"FunctionGenerator initialised with null source function.");
}
declareAttribute("NumDeriv", Attribute(false));
}
示例2: fitBackgroundFunction
/** Fit background function
*/
void ProcessBackground::fitBackgroundFunction(std::string bkgdfunctiontype) {
// Get background type and create bakground function
BackgroundFunction_sptr bkgdfunction =
createBackgroundFunction(bkgdfunctiontype);
int bkgdorder = getProperty("OutputBackgroundOrder");
bkgdfunction->setAttributeValue("n", bkgdorder);
if (bkgdfunctiontype == "Chebyshev") {
double xmin = m_outputWS->readX(0).front();
double xmax = m_outputWS->readX(0).back();
g_log.information() << "Chebyshev Fit range: " << xmin << ", " << xmax
<< "\n";
bkgdfunction->setAttributeValue("StartX", xmin);
bkgdfunction->setAttributeValue("EndX", xmax);
}
g_log.information() << "Fit selected background " << bkgdfunctiontype
<< " to data workspace with "
<< m_outputWS->getNumberHistograms() << " spectra."
<< "\n";
// Fit input (a few) background pionts to get initial guess
API::IAlgorithm_sptr fit;
try {
fit = this->createChildAlgorithm("Fit", 0.9, 1.0, true);
} catch (Exception::NotFoundError &) {
g_log.error() << "Requires CurveFitting library." << std::endl;
throw;
}
g_log.information() << "Fitting background function: "
<< bkgdfunction->asString() << "\n";
double startx = m_lowerBound;
double endx = m_upperBound;
fit->setProperty("Function",
boost::dynamic_pointer_cast<API::IFunction>(bkgdfunction));
fit->setProperty("InputWorkspace", m_outputWS);
fit->setProperty("WorkspaceIndex", 0);
fit->setProperty("MaxIterations", 500);
fit->setProperty("StartX", startx);
fit->setProperty("EndX", endx);
fit->setProperty("Minimizer", "Levenberg-MarquardtMD");
fit->setProperty("CostFunction", "Least squares");
fit->executeAsChildAlg();
// Get fit status and chi^2
std::string fitStatus = fit->getProperty("OutputStatus");
bool allowedfailure = (fitStatus.find("cannot") < fitStatus.size()) &&
(fitStatus.find("tolerance") < fitStatus.size());
if (fitStatus.compare("success") != 0 && !allowedfailure) {
g_log.error() << "ProcessBackground: Fit Status = " << fitStatus
<< ". Not to update fit result" << std::endl;
throw std::runtime_error("Bad Fit");
}
const double chi2 = fit->getProperty("OutputChi2overDoF");
g_log.information() << "Fit background: Fit Status = " << fitStatus
<< ", chi2 = " << chi2 << "\n";
// Get out the parameter names
API::IFunction_sptr funcout = fit->getProperty("Function");
TableWorkspace_sptr outbkgdparws = boost::make_shared<TableWorkspace>();
outbkgdparws->addColumn("str", "Name");
outbkgdparws->addColumn("double", "Value");
TableRow typerow = outbkgdparws->appendRow();
typerow << bkgdfunctiontype << 0.;
vector<string> parnames = funcout->getParameterNames();
size_t nparam = funcout->nParams();
for (size_t i = 0; i < nparam; ++i) {
TableRow newrow = outbkgdparws->appendRow();
newrow << parnames[i] << funcout->getParameter(i);
}
TableRow chi2row = outbkgdparws->appendRow();
chi2row << "Chi-square" << chi2;
g_log.information() << "Set table workspace (#row = "
<< outbkgdparws->rowCount()
<< ") to OutputBackgroundParameterTable. "
<< "\n";
setProperty("OutputBackgroundParameterWorkspace", outbkgdparws);
// Set output workspace
const MantidVec &vecX = m_outputWS->readX(0);
const MantidVec &vecY = m_outputWS->readY(0);
FunctionDomain1DVector domain(vecX);
FunctionValues values(domain);
funcout->function(domain, values);
MantidVec &dataModel = m_outputWS->dataY(1);
MantidVec &dataDiff = m_outputWS->dataY(2);
for (size_t i = 0; i < dataModel.size(); ++i) {
//.........这里部分代码省略.........
示例3: copyInput
/**
* Execute smoothing of a single spectrum.
* @param inputWS :: A workspace to pick a spectrum from.
* @param wsIndex :: An index of a spectrum to smooth.
* @return :: A single-spectrum workspace with the smoothed data.
*/
API::MatrixWorkspace_sptr
WienerSmooth::smoothSingleSpectrum(API::MatrixWorkspace_sptr inputWS,
size_t wsIndex) {
size_t dataSize = inputWS->blocksize();
// it won't work for very small workspaces
if (dataSize < 4) {
g_log.debug() << "No smoothing, spectrum copied." << std::endl;
return copyInput(inputWS, wsIndex);
}
// Due to the way RealFFT works the input should be even-sized
const bool isOddSize = dataSize % 2 != 0;
if (isOddSize) {
// add a fake value to the end to make size even
inputWS = copyInput(inputWS, wsIndex);
wsIndex = 0;
auto &X = inputWS->dataX(wsIndex);
auto &Y = inputWS->dataY(wsIndex);
auto &E = inputWS->dataE(wsIndex);
double dx = X[dataSize - 1] - X[dataSize - 2];
X.push_back(X.back() + dx);
Y.push_back(Y.back());
E.push_back(E.back());
}
// the input vectors
auto &X = inputWS->readX(wsIndex);
auto &Y = inputWS->readY(wsIndex);
auto &E = inputWS->readE(wsIndex);
// Digital fourier transform works best for data oscillating around 0.
// Fit a spline with a small number of break points to the data.
// Make sure that the spline passes through the first and the last points
// of the data.
// The fitted spline will be subtracted from the data and the difference
// will be smoothed with the Wiener filter. After that the spline will be
// added to the smoothed data to produce the output.
// number of spline break points, must be smaller than the data size but
// between 2 and 10
size_t nbreak = 10;
if (nbreak * 3 > dataSize)
nbreak = dataSize / 3;
// NB. The spline mustn't fit too well to the data. If it does smoothing
// doesn't happen.
// TODO: it's possible that the spline is unnecessary and a simple linear
// function will
// do a better job.
g_log.debug() << "Spline break points " << nbreak << std::endl;
// define the spline
API::IFunction_sptr spline =
API::FunctionFactory::Instance().createFunction("BSpline");
auto xInterval = getStartEnd(X, inputWS->isHistogramData());
spline->setAttributeValue("StartX", xInterval.first);
spline->setAttributeValue("EndX", xInterval.second);
spline->setAttributeValue("NBreak", static_cast<int>(nbreak));
// fix the first and last parameters to the first and last data values
spline->setParameter(0, Y.front());
spline->fix(0);
size_t lastParamIndex = spline->nParams() - 1;
spline->setParameter(lastParamIndex, Y.back());
spline->fix(lastParamIndex);
// fit the spline to the data
auto fit = createChildAlgorithm("Fit");
fit->initialize();
fit->setProperty("Function", spline);
fit->setProperty("InputWorkspace", inputWS);
fit->setProperty("WorkspaceIndex", static_cast<int>(wsIndex));
fit->setProperty("CreateOutput", true);
fit->execute();
// get the fit output workspace; spectrum 2 contains the difference that is to
// be smoothed
API::MatrixWorkspace_sptr fitOut = fit->getProperty("OutputWorkspace");
// Fourier transform the difference spectrum
auto fourier = createChildAlgorithm("RealFFT");
fourier->initialize();
fourier->setProperty("InputWorkspace", fitOut);
fourier->setProperty("WorkspaceIndex", 2);
// we don't require bin linearity as we don't need the exact transform
fourier->setProperty("IgnoreXBins", true);
fourier->execute();
API::MatrixWorkspace_sptr fourierOut =
fourier->getProperty("OutputWorkspace");
// spectrum 2 of the transformed workspace has the transform modulus which is
// a square
//.........这里部分代码省略.........
示例4: addValDerivHessian
/**
* Update the cost function, derivatives and hessian by adding values calculated
* on a domain.
* @param function :: Function to use to calculate the value and the derivatives
* @param domain :: The domain.
* @param values :: The fit function values
* @param evalFunction :: Flag to evaluate the function
* @param evalDeriv :: Flag to evaluate the derivatives
* @param evalHessian :: Flag to evaluate the Hessian
*/
void CostFuncLeastSquares::addValDerivHessian(
API::IFunction_sptr function,
API::FunctionDomain_sptr domain,
API::FunctionValues_sptr values,
bool evalFunction , bool evalDeriv, bool evalHessian) const
{
UNUSED_ARG(evalDeriv);
size_t np = function->nParams(); // number of parameters
size_t ny = domain->size(); // number of data points
Jacobian jacobian(ny,np);
if (evalFunction)
{
function->function(*domain,*values);
}
function->functionDeriv(*domain,jacobian);
size_t iActiveP = 0;
double fVal = 0.0;
if (debug)
{
std::cerr << "Jacobian:\n";
for(size_t i = 0; i < ny; ++i)
{
for(size_t ip = 0; ip < np; ++ip)
{
if ( !m_function->isActive(ip) ) continue;
std::cerr << jacobian.get(i,ip) << ' ';
}
std::cerr << std::endl;
}
}
for(size_t ip = 0; ip < np; ++ip)
{
if ( !function->isActive(ip) ) continue;
double d = 0.0;
for(size_t i = 0; i < ny; ++i)
{
double calc = values->getCalculated(i);
double obs = values->getFitData(i);
double w = values->getFitWeight(i);
double y = ( calc - obs ) * w;
d += y * jacobian.get(i,ip) * w;
if (iActiveP == 0 && evalFunction)
{
fVal += y * y;
}
}
PARALLEL_CRITICAL(der_set)
{
double der = m_der.get(iActiveP);
m_der.set(iActiveP, der + d);
}
//std::cerr << "der " << ip << ' ' << der[iActiveP] << std::endl;
++iActiveP;
}
if (evalFunction)
{
PARALLEL_ATOMIC
m_value += 0.5 * fVal;
}
if (!evalHessian) return;
//size_t na = m_der.size(); // number of active parameters
size_t i1 = 0; // active parameter index
for(size_t i = 0; i < np; ++i) // over parameters
{
if ( !function->isActive(i) ) continue;
size_t i2 = 0; // active parameter index
for(size_t j = 0; j <= i; ++j) // over ~ half of parameters
{
if ( !function->isActive(j) ) continue;
double d = 0.0;
for(size_t k = 0; k < ny; ++k) // over fitting data
{
double w = values->getFitWeight(k);
d += jacobian.get(k,i) * jacobian.get(k,j) * w * w;
}
PARALLEL_CRITICAL(hessian_set)
{
double h = m_hessian.get(i1,i2);
m_hessian.set(i1,i2, h + d);
//std::cerr << "hess " << i1 << ' ' << i2 << std::endl;
if (i1 != i2)
{
m_hessian.set(i2,i1,h + d);
}
}
//.........这里部分代码省略.........
示例5: addValDerivHessian
/**
* Update the cost function, derivatives and hessian by adding values calculated
* on a domain.
* @param function :: Function to use to calculate the value and the derivatives
* @param domain :: The domain.
* @param values :: The fit function values
* @param evalDeriv :: Flag to evaluate the derivatives
* @param evalHessian :: Flag to evaluate the Hessian
*/
void CostFuncLeastSquares::addValDerivHessian(API::IFunction_sptr function,
API::FunctionDomain_sptr domain,
API::FunctionValues_sptr values,
bool evalDeriv,
bool evalHessian) const {
UNUSED_ARG(evalDeriv);
function->function(*domain, *values);
size_t np = function->nParams(); // number of parameters
size_t ny = values->size(); // number of data points
Jacobian jacobian(ny, np);
function->functionDeriv(*domain, jacobian);
size_t iActiveP = 0;
double fVal = 0.0;
std::vector<double> weights = getFitWeights(values);
for (size_t ip = 0; ip < np; ++ip) {
if (!function->isActive(ip))
continue;
double d = 0.0;
for (size_t i = 0; i < ny; ++i) {
double calc = values->getCalculated(i);
double obs = values->getFitData(i);
double w = weights[i];
double y = (calc - obs) * w;
d += y * jacobian.get(i, ip) * w;
if (iActiveP == 0) {
fVal += y * y;
}
}
PARALLEL_CRITICAL(der_set) {
double der = m_der.get(iActiveP);
m_der.set(iActiveP, der + d);
}
++iActiveP;
}
PARALLEL_ATOMIC
m_value += 0.5 * fVal;
if (!evalHessian)
return;
size_t i1 = 0; // active parameter index
for (size_t i = 0; i < np; ++i) // over parameters
{
if (!function->isActive(i))
continue;
size_t i2 = 0; // active parameter index
for (size_t j = 0; j <= i; ++j) // over ~ half of parameters
{
if (!function->isActive(j))
continue;
double d = 0.0;
for (size_t k = 0; k < ny; ++k) // over fitting data
{
double w = weights[k];
d += jacobian.get(k, i) * jacobian.get(k, j) * w * w;
}
PARALLEL_CRITICAL(hessian_set) {
double h = m_hessian.get(i1, i2);
m_hessian.set(i1, i2, h + d);
if (i1 != i2) {
m_hessian.set(i2, i1, h + d);
}
}
++i2;
}
++i1;
}
}