本文整理汇总了C++中MatrixT::cols方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixT::cols方法的具体用法?C++ MatrixT::cols怎么用?C++ MatrixT::cols使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MatrixT
的用法示例。
在下文中一共展示了MatrixT::cols方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computeInverseOperation
bool WHeadPositionCorrection::computeInverseOperation( MatrixT* const g, const MatrixT& lf ) const
{
WLTimeProfiler profiler( CLASS, __func__, true );
const float snr = 25;
const MatrixT noiseCov = MatrixT::Identity( lf.rows(), lf.rows() );
// Leafield transpose matrix
const MatrixT LT = lf.transpose();
// WinvLT = W^-1 * LT
SpMatrixT w = SpMatrixT( lf.cols(), lf.cols() );
w.setIdentity();
w.makeCompressed();
SparseLU< SpMatrixT > spSolver;
spSolver.compute( w );
if( spSolver.info() != Eigen::Success )
{
wlog::error( CLASS ) << "spSolver.compute( weighting ) not succeeded: " << spSolver.info();
return false;
}
const MatrixT WinvLT = spSolver.solve( LT ); // needs dense matrix, returns dense matrix
if( spSolver.info() != Eigen::Success )
{
wlog::error( CLASS ) << "spSolver.solve( LT ) not succeeded: " << spSolver.info();
return false;
}
wlog::debug( CLASS ) << "WinvLT " << WinvLT.rows() << " x " << WinvLT.cols();
// LWL = L * W^-1 * LT
const MatrixT LWL = lf * WinvLT;
wlog::debug( CLASS ) << "LWL " << LWL.rows() << " x " << LWL.cols();
// alpha = sqrt(trace(LWL)/(snr * num_sensors));
double alpha = sqrt( LWL.trace() / ( snr * lf.rows() ) );
// G = W^-1 * LT * inv( (L W^-1 * LT) + alpha^2 * Cn )
const MatrixT toInv = LWL + pow( alpha, 2 ) * noiseCov;
const MatrixT inv = toInv.inverse();
*g = WinvLT * inv;
WAssertDebug( g->rows() == lf.cols() && g->cols() == lf.rows(), "Dimension of G and L does not match." );
return true;
}
示例2: descriptions
static Eigen::VectorXf GetZeroMeanDescriptor
(
const MatrixT & descriptions
)
{
Eigen::VectorXf zero_mean_descriptor;
if (descriptions.rows() == 0) {
return zero_mean_descriptor;
}
// Compute the ZeroMean descriptor
zero_mean_descriptor.setZero(descriptions.cols());
const typename MatrixT::Index nbDescriptions = descriptions.rows();
for (int i = 0; i < nbDescriptions; ++i)
{
for (int j = 0; j < descriptions.cols(); ++j)
zero_mean_descriptor(j) += descriptions(i,j);
}
return zero_mean_descriptor / static_cast<double>(nbDescriptions);
}
示例3: candidate_hamming_distances
void Match_HashedDescriptions
(
const HashedDescriptions& hashed_descriptions1,
const MatrixT & descriptions1,
const HashedDescriptions& hashed_descriptions2,
const MatrixT & descriptions2,
IndMatches * pvec_indices,
std::vector<DistanceType> * pvec_distances,
const int NN = 2
) const
{
typedef L2_Vectorized<typename MatrixT::Scalar> MetricT;
MetricT metric;
static const int kNumTopCandidates = 10;
// Preallocate the candidate descriptors container.
std::vector<int> candidate_descriptors;
candidate_descriptors.reserve(hashed_descriptions2.hashed_desc.size());
// Preallocated hamming distances. Each column indicates the hamming distance
// and the rows collect the descriptor ids with that
// distance. num_descriptors_with_hamming_distance keeps track of how many
// descriptors have that distance.
Eigen::MatrixXi candidate_hamming_distances(
hashed_descriptions2.hashed_desc.size(), nb_hash_code_ + 1);
Eigen::VectorXi num_descriptors_with_hamming_distance(nb_hash_code_ + 1);
// Preallocate the container for keeping euclidean distances.
std::vector<std::pair<DistanceType, int> > candidate_euclidean_distances;
candidate_euclidean_distances.reserve(kNumTopCandidates);
// A preallocated vector to determine if we have already used a particular
// feature for matching (i.e., prevents duplicates).
std::vector<bool> used_descriptor(hashed_descriptions2.hashed_desc.size());
typedef matching::Hamming<stl::dynamic_bitset::BlockType> HammingMetricType;
static const HammingMetricType metricH = {};
for (int i = 0; i < hashed_descriptions1.hashed_desc.size(); ++i)
{
candidate_descriptors.clear();
num_descriptors_with_hamming_distance.setZero();
candidate_euclidean_distances.clear();
const auto& hashed_desc = hashed_descriptions1.hashed_desc[i];
// Accumulate all descriptors in each bucket group that are in the same
// bucket id as the query descriptor.
for (int j = 0; j < nb_bucket_groups_; ++j)
{
const uint16_t bucket_id = hashed_desc.bucket_ids[j];
for (const auto& feature_id : hashed_descriptions2.buckets[j][bucket_id])
{
candidate_descriptors.push_back(feature_id);
used_descriptor[feature_id] = false;
}
}
// Skip matching this descriptor if there are not at least NN candidates.
if (candidate_descriptors.size() <= NN)
{
continue;
}
// Compute the hamming distance of all candidates based on the comp hash
// code. Put the descriptors into buckets corresponding to their hamming
// distance.
for (const int candidate_id : candidate_descriptors)
{
if (!used_descriptor[candidate_id]) // avoid selecting the same candidate multiple times
{
used_descriptor[candidate_id] = true;
const HammingMetricType::ResultType hamming_distance = metricH(
hashed_desc.hash_code.data(),
hashed_descriptions2.hashed_desc[candidate_id].hash_code.data(),
hashed_desc.hash_code.num_blocks());
candidate_hamming_distances(
num_descriptors_with_hamming_distance(hamming_distance)++,
hamming_distance) = candidate_id;
}
}
// Compute the euclidean distance of the k descriptors with the best hamming
// distance.
candidate_euclidean_distances.reserve(kNumTopCandidates);
for (int j = 0; j < candidate_hamming_distances.cols() &&
(candidate_euclidean_distances.size() < kNumTopCandidates); ++j)
{
for(int k = 0; k < num_descriptors_with_hamming_distance(j) &&
(candidate_euclidean_distances.size() < kNumTopCandidates); ++k)
{
const int candidate_id = candidate_hamming_distances(k, j);
const DistanceType distance = metric(
descriptions2.row(candidate_id).data(),
descriptions1.row(i).data(),
descriptions1.cols());
candidate_euclidean_distances.push_back(std::make_pair(distance, candidate_id));
}
//.........这里部分代码省略.........
示例4: descriptor
HashedDescriptions CreateHashedDescriptions
(
const MatrixT & descriptions,
const Eigen::VectorXf & zero_mean_descriptor
) const
{
// Steps:
// 1) Compute hash code and hash buckets (based on the zero_mean_descriptor).
// 2) Construct buckets.
HashedDescriptions hashed_descriptions;
if (descriptions.rows() == 0) {
return hashed_descriptions;
}
// Create hash codes for each description.
{
// Allocate space for hash codes.
const typename MatrixT::Index nbDescriptions = descriptions.rows();
hashed_descriptions.hashed_desc.resize(nbDescriptions);
Eigen::VectorXf descriptor(descriptions.cols());
for (int i = 0; i < nbDescriptions; ++i)
{
// Allocate space for each bucket id.
hashed_descriptions.hashed_desc[i].bucket_ids.resize(nb_bucket_groups_);
for (int k = 0; k < descriptions.cols(); ++k)
{
descriptor(k) = descriptions(i,k);
}
descriptor -= zero_mean_descriptor;
auto& hash_code = hashed_descriptions.hashed_desc[i].hash_code;
hash_code = stl::dynamic_bitset(descriptions.cols());
// Compute hash code.
const Eigen::VectorXf primary_projection = primary_hash_projection_ * descriptor;
for (int j = 0; j < nb_hash_code_; ++j)
{
hash_code[j] = primary_projection(j) > 0;
}
// Determine the bucket index for each group.
Eigen::VectorXf secondary_projection;
for (int j = 0; j < nb_bucket_groups_; ++j)
{
uint16_t bucket_id = 0;
secondary_projection = secondary_hash_projection_[j] * descriptor;
for (int k = 0; k < nb_bits_per_bucket_; ++k)
{
bucket_id = (bucket_id << 1) + (secondary_projection(k) > 0 ? 1 : 0);
}
hashed_descriptions.hashed_desc[i].bucket_ids[j] = bucket_id;
}
}
}
// Build the Buckets
{
hashed_descriptions.buckets.resize(nb_bucket_groups_);
for (int i = 0; i < nb_bucket_groups_; ++i)
{
hashed_descriptions.buckets[i].resize(nb_buckets_per_group_);
// Add the descriptor ID to the proper bucket group and id.
for (int j = 0; j < hashed_descriptions.hashed_desc.size(); ++j)
{
const uint16_t bucket_id = hashed_descriptions.hashed_desc[j].bucket_ids[i];
hashed_descriptions.buckets[i][bucket_id].push_back(j);
}
}
}
return hashed_descriptions;
}