本文整理汇总了C++中MatrixXf::cols方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixXf::cols方法的具体用法?C++ MatrixXf::cols怎么用?C++ MatrixXf::cols使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MatrixXf
的用法示例。
在下文中一共展示了MatrixXf::cols方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: doContinousHPI
void FiffSimulator::doContinousHPI(MatrixXf& matData)
{
//This only works with babyMEG HPI channels 400 ... 407
if(m_pFiffInfo && m_pHPIWidget && matData.rows() >= 407) {
if(m_pHPIWidget->wasLastFitOk()) {
// Load device to head transformation matrix from Fiff info
QMatrix3x3 rot;
for(int ir = 0; ir < 3; ir++) {
for(int ic = 0; ic < 3; ic++) {
rot(ir,ic) = m_pFiffInfo->dev_head_t.trans(ir,ic);
}
}
QQuaternion quatHPI = QQuaternion::fromRotationMatrix(rot);
// Write rotation quaternion to HPI Ch #1~3
matData.row(401) = MatrixXf::Constant(1,matData.cols(), quatHPI.x());
matData.row(402) = MatrixXf::Constant(1,matData.cols(), quatHPI.y());
matData.row(403) = MatrixXf::Constant(1,matData.cols(), quatHPI.z());
// Write translation vector to HPI Ch #4~6
matData.row(404) = MatrixXf::Constant(1,matData.cols(), m_pFiffInfo->dev_head_t.trans(0,3));
matData.row(405) = MatrixXf::Constant(1,matData.cols(), m_pFiffInfo->dev_head_t.trans(1,3));
matData.row(406) = MatrixXf::Constant(1,matData.cols(), m_pFiffInfo->dev_head_t.trans(2,3));
// Write GOF to HPI Ch #7
// Write goodness of fit (GOF)to HPI Ch #7
float dpfitError = 0.0;
float GOF = 1 - dpfitError;
matData.row(407) = MatrixXf::Constant(1,matData.cols(), GOF);
}
}
}
示例2: transformPoints
MatrixXf transformPoints(Matrix3f X, MatrixXf P){
MatrixXf Pfull(3, P.cols());
for(int i=0; i<P.cols(); i++){
Pfull(0, i) = P(0, i);
Pfull(1, i) = P(1, i);
Pfull(2, i) = 1;
}
Pfull = X*Pfull;
MatrixXf Pt(2, P.cols());
for(int i=0; i<P.cols(); i++){
Pt(0, i) = Pfull(0, i);
Pt(1, i) = Pfull(1, i);
}
return Pt;
}
示例3: evaluate
double IntersectionOverUnion::evaluate( MatrixXf & d_mul_Q, const MatrixXf & Q ) const {
assert( gt_.rows() == Q.cols() );
const int N = Q.cols(), M = Q.rows();
d_mul_Q = 0*Q;
VectorXd in(M), un(M);
in.fill(0.f);
un.fill(1e-20);
for( int i=0; i<N; i++ ) {
if( 0 <= gt_[i] && gt_[i] < M ) {
in[ gt_[i] ] += Q(gt_[i],i);
un[ gt_[i] ] += 1;
for( int l=0; l<M; l++ )
if( l!=gt_[i] )
un[ l ] += Q(l,i);
}
}
for( int i=0; i<N; i++ )
if( 0 <= gt_[i] && gt_[i] < M ) {
for( int l=0; l<M; l++ )
if( l==gt_[i] )
d_mul_Q(l,i) = Q(l,i) / (un[l]*M);
else
d_mul_Q(l,i) = - Q(l,i) * in[l] / ( un[l] * un[l] * M);
}
return (in.array()/un.array()).sum()/M;
}
示例4: featureGradient
MatrixXf featureGradient( const MatrixXf & a, const MatrixXf & b ) const {
if (ntype_ == NO_NORMALIZATION )
return kernelGradient( a, b );
else if (ntype_ == NORMALIZE_SYMMETRIC ) {
MatrixXf fa = lattice_.compute( a*norm_.asDiagonal(), true );
MatrixXf fb = lattice_.compute( b*norm_.asDiagonal() );
MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() );
VectorXf norm3 = norm_.array()*norm_.array()*norm_.array();
MatrixXf r = kernelGradient( 0.5*( a.array()*fb.array() + fa.array()*b.array() ).matrix()*norm3.asDiagonal(), ones );
return - r + kernelGradient( a*norm_.asDiagonal(), b*norm_.asDiagonal() );
}
else if (ntype_ == NORMALIZE_AFTER ) {
MatrixXf fb = lattice_.compute( b );
MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() );
VectorXf norm2 = norm_.array()*norm_.array();
MatrixXf r = kernelGradient( ( a.array()*fb.array() ).matrix()*norm2.asDiagonal(), ones );
return - r + kernelGradient( a*norm_.asDiagonal(), b );
}
else /*if (ntype_ == NORMALIZE_BEFORE )*/ {
MatrixXf fa = lattice_.compute( a, true );
MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() );
VectorXf norm2 = norm_.array()*norm_.array();
MatrixXf r = kernelGradient( ( fa.array()*b.array() ).matrix()*norm2.asDiagonal(), ones );
return -r+kernelGradient( a, b*norm_.asDiagonal() );
}
}
示例5: sumAndNormalize
void sumAndNormalize( MatrixXf & out, const MatrixXf & in, const MatrixXf & Q ) {
out.resize( in.rows(), in.cols() );
for( int i=0; i<in.cols(); i++ ){
VectorXf b = in.col(i);
VectorXf q = Q.col(i);
out.col(i) = b.array().sum()*q - b;
}
}
示例6: expAndNormalize
///////////////////////
///// Inference /////
///////////////////////
void expAndNormalize ( MatrixXf & out, const MatrixXf & in ) {
out.resize( in.rows(), in.cols() );
for( int i=0; i<out.cols(); i++ ){
VectorXf b = in.col(i);
b.array() -= b.maxCoeff();
b = b.array().exp();
out.col(i) = b / b.array().sum();
}
}
示例7: compute
void Permutohedral::compute ( MatrixXf & out, const MatrixXf & in, bool reverse ) const
{
if( out.cols() != in.cols() || out.rows() != in.rows() )
out = 0*in;
if( in.rows() <= 2 )
seqCompute( out.data(), in.data(), in.rows(), reverse );
else
sseCompute( out.data(), in.data(), in.rows(), reverse );
}
示例8: blas_gemm
void blas_gemm(const MatrixXf& a, const MatrixXf& b, MatrixXf& c)
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
sgemm_(¬rans,¬rans,&M,&N,&K,&fone,
const_cast<float*>(a.data()),&lda,
const_cast<float*>(b.data()),&ldb,&fone,
c.data(),&ldc);
}
示例9: datapoint
vector<float> applyPCAtoVector(vector<float> &descriptorValues, MatrixXf &eigen_vects)
{
MatrixXf datapoint(1,descriptorValues.size());
for (int i = 0; i < descriptorValues.size(); ++i)
datapoint(0,i) = descriptorValues[i];
MatrixXf reduceddatapnt = pca::transformPointMatrix(datapoint, eigen_vects);
vector<float> retfeatvect(reduceddatapnt.cols());
for (int i = 0; i < reduceddatapnt.cols(); ++i)
retfeatvect[i] = reduceddatapnt(0,i);
return retfeatvect;
}
示例10: datapoints
vector<vector<float> > applyPCAtoVector2D(vector<vector<float> > &descriptorValues, MatrixXf &eigen_vects)
{
MatrixXf datapoints(descriptorValues.size(),descriptorValues[0].size());
for (int i = 0; i < descriptorValues.size(); ++i)
for (int j = 0; j < descriptorValues[0].size(); ++j)
datapoints(i, j) = descriptorValues[i][j];
MatrixXf reduceddatapnts = pca::transformPointMatrix(datapoints, eigen_vects);
vector<vector<float> > retfeatvects(reduceddatapnts.rows(), vector<float>(reduceddatapnts.cols()));
for (int i = 0; i < reduceddatapnts.rows(); ++i)
for (int j = 0; j < reduceddatapnts.cols(); ++j)
retfeatvects[i][j] = reduceddatapnts(i,j);
return retfeatvects;
}
示例11: run
void Neuromag::run()
{
MatrixXf matValue;
qint32 size = 0;
while(m_bIsRunning) {
if(m_pRawMatrixBuffer_In) {
//pop matrix
matValue = m_pRawMatrixBuffer_In->pop();
//Write raw data to fif file
if(m_bWriteToFile) {
size += matValue.rows()*matValue.cols() * 4;
if(size > MAX_DATA_LEN) {
size = 0;
this->splitRecordingFile();
}
m_mutex.lock();
if(m_pOutfid) {
m_pOutfid->write_raw_buffer(matValue.cast<double>());
}
m_mutex.unlock();
} else {
size = 0;
}
if(m_pRTMSA_Neuromag) {
m_pRTMSA_Neuromag->data()->setValue(this->calibrate(matValue));
}
}
}
}
示例12: projectDirections
void RealtimeMF_openni::projectDirections(cv::Mat& I, const MatrixXf& dirs,
double f_d, const Matrix<uint8_t,Dynamic,Dynamic>& colors)
{
double scale = 0.1;
VectorXf p0(3); p0 << 0.35,0.25,1;
double u0 = p0(0)/p0(2)*f_d + 320.;
double v0 = p0(1)/p0(2)*f_d + 240.;
for(uint32_t k=0; k < dirs.cols(); ++k)
{
VectorXf p1 = p0 + dirs.col(k)*scale;
double u1 = p1(0)/p1(2)*f_d + 320.;
double v1 = p1(1)/p1(2)*f_d + 240.;
cv::line(I, cv::Point(u0,v0), cv::Point(u1,v1),
CV_RGB(colors(k,0),colors(k,1),colors(k,2)), 2, CV_AA);
double arrowLen = 10.;
double angle = atan2(v1-v0,u1-u0);
double ru1 = u1 - arrowLen*cos(angle + M_PI*0.25);
double rv1 = v1 - arrowLen*sin(angle + M_PI*0.25);
cv::line(I, cv::Point(u1,v1), cv::Point(ru1,rv1),
CV_RGB(colors(k,0),colors(k,1),colors(k,2)), 2, CV_AA);
ru1 = u1 - arrowLen*cos(angle - M_PI*0.25);
rv1 = v1 - arrowLen*sin(angle - M_PI*0.25);
cv::line(I, cv::Point(u1,v1), cv::Point(ru1,rv1),
CV_RGB(colors(k,0),colors(k,1),colors(k,2)), 2, CV_AA);
}
cv::circle(I, cv::Point(u0,v0), 2, CV_RGB(0,0,0), 2, CV_AA);
}
示例13: logsumexp
VectorXf EMclustering::logsumexp(MatrixXf x, int dim)
{
int r = x.rows();
int c = x.cols();
VectorXf y(r);
MatrixXf tmp1(r,c);
VectorXf tmp2(r);
VectorXf s(r);
y = x.rowwise().maxCoeff();//cerr<<"y"<<y<<endl<<endl;
x = x.colwise() - y;
//cerr<<"x"<<x<<endl<<endl;
tmp1 = x.array().exp();
//cerr<<"t"<<tmp1<<endl<<endl;
tmp2 = tmp1.rowwise().sum();
//cerr<<"t"<<tmp2<<endl<<endl;
s = y.array() + tmp2.array().log();
for(int i=0;i<s.size();i++)
{
if(!isfinite(s(i)))
{
s(i) = y(i);
}
}
y.resize(0);
tmp1.resize(0,0);
tmp2.resize(0);
return s;
}
示例14: createDigTrig
void BabyMEG::createDigTrig(MatrixXf& data)
{
//Look for triggers in all trigger channels
//m_qMapDetectedTrigger = DetectTrigger::detectTriggerFlanksMax(data.at(b), m_lTriggerChannelIndices, m_iCurrentSample-nCol, m_dTriggerThreshold, true);
QMap<int,QList<QPair<int,double> > > qMapDetectedTrigger = DetectTrigger::detectTriggerFlanksGrad(data.cast<double>(), m_lTriggerChannelIndices, 0, 3.0, false, "Rising");
//Combine and write results into data block's digital trigger channel
QMapIterator<int,QList<QPair<int,double> >> i(qMapDetectedTrigger);
int counter = 0;
int idxDigTrig = m_pFiffInfo->ch_names.indexOf("DTRG01");
while (i.hasNext())
{
i.next();
QList<QPair<int,double> > lDetectedTriggers = i.value();
for(int k = 0; k < lDetectedTriggers.size(); ++k)
{
if(lDetectedTriggers.at(k).first < data.cols() && lDetectedTriggers.at(k).first >= 0)
{
data(idxDigTrig,lDetectedTriggers.at(k).first) = data(idxDigTrig,lDetectedTriggers.at(k).first) + pow(2,counter);
}
}
counter++;
}
}
示例15: normalizeEigenFaces
/**
* Normalizes each eigenface in a matrix.
*
* @param eigenfaces A matrix of eigen faces to normalize
*/
void normalizeEigenFaces(MatrixXf &eigenfaces)
{
for(int i = 0; i < eigenfaces.cols(); i++)
{
eigenfaces.col(i).normalize();
}
}