本文整理汇总了C++中eigen::VectorXf::data方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXf::data方法的具体用法?C++ VectorXf::data怎么用?C++ VectorXf::data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::VectorXf
的用法示例。
在下文中一共展示了VectorXf::data方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: if
float mmf::OptSO3MMFvMF::computeAssignment(uint32_t& N)
{
N = this->cld_.counts().sum();
// Compute log of the cluster weights and push them to GPU
Eigen::VectorXf pi = Eigen::VectorXf::Ones(K()*6)*1000;
std::cout << this->t_ << std::endl;
std::cout<<"counts: "<<this->cld_.counts().transpose()<<std::endl;
if (this->t_ == 0) {
pi.fill(1.);
} else if (this->t_ >24) {
std::cout << "truncating noisy MFs: " << std::endl;
for (uint32_t k=0; k<K()*6; ++k) {
float count = this->cld_.counts().middleRows((k/6)*6,6).sum();
pi(k) = count > 0.10*N ? count : 1.e-20;
std::cout << count << " < " << 0.1*N << std::endl;
}
} else {
// estimate the axis and MF proportions
// pi += this->cld_.counts();
// estimate only the MF proportions
for (uint32_t k=0; k<K()*6; ++k)
pi(k) += this->cld_.counts().middleRows((k/6)*6,6).sum();
if (estimateTau_) {
for (uint32_t k=0; k<K()*6; ++k)
if (this->cld_.counts()(k) == 0) {
taus_(k) = 0.; // uniform
} else {
Eigen::Vector3f mu = Eigen::Vector3f::Zero();
mu((k%6)/2) = (k%6)%2==0?-1.:1.;
mu = Rs_[k/6]*mu;
taus_(k) = jsc::vMF<3>::MLEstimateTau(
this->cld_.xSums().col(k).cast<double>(),
mu.cast<double>(), this->cld_.counts()(k));
}
} else {
taus_.fill(60.);
}
}
std::cout<<"pi: "<<pi.transpose()<<std::endl;
pi = (pi.array() / pi.sum()).array().log();
std::cout<<"pi: "<<pi.transpose()<<std::endl;
if (estimateTau_) {
std::cout << pi.transpose() << std::endl;
std::cout << taus_.transpose() << std::endl;
for (uint32_t k=0; k<K()*6; ++k) {
pi(k) -= jsc::vMF<3>::log2SinhOverZ(taus_(k)) - log(2.*M_PI);
}
}
pi_.set(pi);
Rot2Device();
Eigen::VectorXf residuals = Eigen::VectorXf::Zero(K()*6);
MMFvMFCostFctAssignmentGPU((float*)residuals.data(), d_cost, &N,
d_N_, cld_.d_x(), d_weights_, cld_.d_z(), d_mu_, pi_.data(),
cld_.N(), K());
return residuals.sum();
};
示例2: computeCorrelation
void FeatureEval::computeCorrelation(float * data, int vector_size, int size, std::vector<int> & big_to_small, int stride, int offset){
stride = stride ? stride : vector_size;
if(ll_->getSelection().size() == 0)
return;
std::set<uint16_t> labels;
for(boost::weak_ptr<Layer> wlayer: ll_->getSelection()){
for(uint16_t label : wlayer.lock()->getLabelSet())
labels.insert(label);
}
std::vector<float> layer = get_scaled_layer_mask(cl_->active_->labels_,
labels,
big_to_small,
size);
Eigen::MatrixXf correlation_mat = multi_correlate(layer, data, vector_size, size, stride, offset);
Eigen::MatrixXf Rxx = correlation_mat.topLeftCorner(vector_size, vector_size);
Eigen::VectorXf c = correlation_mat.block(0, vector_size, vector_size, 1);
//std::cout << correlation_mat << std::endl;
float R = c.transpose() * (Rxx.inverse() * c);
qDebug() << "R^2: " << R;
//qDebug() << "R: " << sqrt(R);
reportResult(R, c.data(), vector_size);
//Eigen::VectorXf tmp = (Rxx.inverse() * c);
qDebug() << "Y -> X correlation <<<<<<<<<<<<<";
std::cout << c << std::endl;
//qDebug() << "Coefs <<<<<<<<<<<<<";
//std::cout << tmp << std::endl;
}
示例3: SplineLossyCompress
void TraceStoreCol::SplineLossyCompress(const std::string &strategy, int order, int flow_ix, char *bad_wells,
float *mad, size_t num_rows, size_t num_cols, size_t num_frames, size_t num_flows,
int use_mesh_neighbors, size_t frame_stride, size_t flow_frame_stride, int16_t *data) {
Eigen::MatrixXf Basis;
vector<float> knots;
FillInKnots(strategy, num_frames, knots);
if (!knots.empty()) {
int boundaries[2];
boundaries[0] = 0;
boundaries[1] = num_frames;
basis_splines_endreps_local_v2(&knots[0], knots.size(), order, boundaries, sizeof(boundaries)/sizeof(boundaries[0]), Basis);
}
Eigen::MatrixXf SX = (Basis.transpose() * Basis).inverse() * Basis.transpose();
Eigen::MatrixXf Y(frame_stride, num_frames);
// Eigen::MatrixXf FlowMeans(num_flows, num_frames);
// FlowMeans.setZero();
int good_wells = 0;
char *bad_start = bad_wells;
char *bad_end = bad_start + frame_stride;
while(bad_start != bad_end) {
if (*bad_start++ == 0) {
good_wells++;
}
}
// if nothing good then skip it
if (good_wells < MIN_SAMPLE_WELL) {
return;
}
ChipReduction smoothed_avg;
int x_clip = num_cols;
int y_clip = num_rows;
if (use_mesh_neighbors == 0) {
x_clip = THUMBNAIL_SIZE;
y_clip = THUMBNAIL_SIZE;
}
smoothed_avg.Init(num_rows, num_cols, num_frames,
SMOOTH_REDUCE_STEP, SMOOTH_REDUCE_STEP,
y_clip, x_clip,
SMOOTH_REDUCE_STEP * SMOOTH_REDUCE_STEP * .4);
for (size_t frame_ix = 0; frame_ix < num_frames; frame_ix++) {
smoothed_avg.ReduceFrame(data + flow_ix * frame_stride + frame_ix * flow_frame_stride, bad_wells, frame_ix);
}
smoothed_avg.SmoothBlocks(SMOOTH_REDUCE_REGION, SMOOTH_REDUCE_REGION);
for (size_t frame_ix = 0; frame_ix < num_frames; frame_ix++) {
float *y_start = Y.data() + frame_ix * frame_stride;
float *y_end = y_start + frame_stride;
int16_t *trace_start = data + flow_ix * frame_stride + frame_ix * flow_frame_stride;
while(y_start != y_end) {
*y_start++ = *trace_start++;
}
}
// subtract off flow,frame avg
for (size_t frame_ix = 0; frame_ix < num_frames; frame_ix++) {
float *start = Y.data() + frame_stride * frame_ix;
float *end = start + frame_stride;
for (size_t row = 0; row < num_rows; row++) {
for (size_t col = 0; col < num_cols; col++) {
float avg = smoothed_avg.GetSmoothEst(row, col, frame_ix);
*start++ -= avg;
}
}
}
// Get coefficients to solve
Eigen::MatrixXf B = Y * SX.transpose();
// Uncompress data into yhat matrix
Eigen::MatrixXf Yhat = B * Basis.transpose();
// add the flow/frame averages back
for (size_t frame_ix = 0; frame_ix < num_frames; frame_ix++) {
float *start = Y.data() + frame_stride * frame_ix;
float *end = start + frame_stride;
float *hstart = Yhat.data() + frame_stride * frame_ix;
for (size_t row = 0; row < num_rows; row++) {
for (size_t col = 0; col < num_cols; col++) {
float avg = smoothed_avg.GetSmoothEst(row, col, frame_ix);
*start++ += avg;
*hstart++ += avg;
}
}
}
for (size_t frame_ix = 0; frame_ix < num_frames; frame_ix++) {
float *yhat_start = Yhat.data() + frame_ix * frame_stride;
float *yhat_end = yhat_start + frame_stride;
int16_t *trace_start = data + flow_ix * frame_stride + frame_ix * flow_frame_stride;
while(yhat_start != yhat_end) {
*trace_start++ = (int)(*yhat_start + .5f);
yhat_start++;
}
}
//.........这里部分代码省略.........
示例4: apply
void apply(Ea& ea)
{
// boost::accumulators::accumulator_set<double, boost::accumulators::stats<boost::accumulators::tag::max> > acc_max;
// boost::accumulators::accumulator_set<double, boost::accumulators::stats<boost::accumulators::tag::min> > acc_min;
SFERES_CONST size_t k = Params::novelty::k;
// merge the population and the archive in a single archive
pop_t archive = _archive;
archive.insert(archive.end(), ea.pop().begin(), ea.pop().end());
// we compute all the distances from pop(i) to archive(j) and store them
Eigen::MatrixXf distances(ea.pop().size(), archive.size());
novelty::_distance_f<Phen> f(ea.pop(), archive, distances);
parallel::init();
parallel::p_for(parallel::range_t(0, ea.pop().size()), f);
// compute the sparseness of each individual of the population
// and potentially add to the archive
int added = 0;
for (size_t i = 0; i < ea.pop().size(); ++i) {
size_t nb_objs = ea.pop()[i]->fit().objs().size();
Eigen::VectorXf vd = distances.row(i);
// Get the k-nearest neighbors
double n = 0.0;
std::partial_sort(vd.data(),
vd.data() + k,
vd.data() + vd.size());
// Sum up the total of distances from k-nearest neighbors
// This is the novelty score
n = vd.head<k>().sum() / k;
// Set the novelty score to the last objective in the array of objectives
ea.pop()[i]->fit().set_obj(nb_objs - 1, n);
// std::cout << "rho = " << _rho_min << "; n = " << n << "\n";
// acc_max(n);
// acc_min(n);
// Check if this individual is already in the archive
if (!in_archive_already(ea.pop()[i]->id()) &&
(n > _rho_min // Check if the novelty score passes the threshold
|| misc::rand<float>() < Params::novelty::add_to_archive_prob))
{
// Add this individual to the permanent archive
_archive.push_back(ea.pop()[i]);
// Print this indiv just added
// std::cout << "----> n = " << n << "; label = " << ea.pop()[i]->fit().label() << "; score = " << ea.pop()[i]->fit().score() << "; id = " << ea.pop()[i]->id() <<"\n";
// std::cout << "----> n = " << n << "; label = " << "; id = " << ea.pop()[i]->id() <<"\n";
_not_added = 0;
++added;
} else {
// Do not add this individual to the archive
++_not_added;
}
} // end for all individuals
// update rho_min
if (_not_added > Params::novelty::stalled_tresh)//2500
{
_rho_min *= 0.95; // Lower rho_min
_not_added = 0; // After lowering the stalled threshold, we need to reset the _not_added count
}
if (_archive.size() > Params::novelty::k && added > Params::novelty::adding_tresh)//4
{
_rho_min *= 1.05f; // Increase rho_min
}
std::cout<<"a size:"<<_archive.size()<<std::endl;
// DEBUG
// std::cout << "a size:"<<_archive.size()
// << "; max: " << boost::accumulators::max(acc_max)
// << "; min: " << boost::accumulators::min(acc_min)
// << std::endl;
}
示例5: run
//.........这里部分代码省略.........
scale[((int)feat.y)*img.cols + (int)feat.x] = true;
}
// Adjust image values
cv::Mat scaledImg = img.clone();
scaledImg += 1;
scaledImg *= (1 / 32.0f);
// For each pixel in the image
for (r = 0; r < scaledImg.rows; ++r)
{
min_row = (size_t)std::max(int(r - 1), 0);
max_row = (size_t)std::min(scaledImg.rows - 1, int(r + 1));
for (c = 0; c < scaledImg.cols; ++c)
{
// Increment pixel index
++index;
// If this is not a feature point
if (!scale[index])
{
min_col = (size_t)std::max(int(c - 1), 0);
max_col = (size_t)std::min(scaledImg.cols - 1, int(c + 1));
neighborCount = 0;
// Loop over 3x3 neighborhoods matrix
// and calculate the variance of the intensities
for (nr = min_row; nr <= max_row; ++nr)
{
for (nc = min_col; nc <= max_col; ++nc)
{
if (nr == r && nc == c) continue;
weights[neighborCount++] = scaledImg.at<float>(nr, nc);
}
}
weights[neighborCount] = scaledImg.at<float>(r, c);
// Calculate the weights statistics
getStat(weights, neighborCount + 1, m, v);
m *= 0.6;
if (v < EPS) v = EPS; // Avoid division by 0
// Apply weight function
mWeightFunc(weights, neighborCount, scaledImg.at<float>(r, c), m, v);
// Normalize the weights and set to coefficients
sum = std::accumulate(weights.begin(), weights.begin() + neighborCount, 0.0f);
i = 0;
for (nr = min_row; nr <= max_row; ++nr)
{
for (nc = min_col; nc <= max_col; ++nc)
{
if (nr == r && nc == c) continue;
nindex = nr*scaledImg.cols + nc;
coefficients.push_back(Eigen::Triplet<float>(
index, nindex, -weights[i++] / sum));
}
}
}
// Add center coefficient
coefficients.push_back(Eigen::Triplet<float>(index, index, 1));
}
}
// Build right side equation vector
Eigen::VectorXf b = Eigen::VectorXf::Zero(pixels);
for (i = 0; i < features.size(); ++i)
{
const FeatureType& feat = features[i];
b[((int)feat.y)*scaledImg.cols + (int)feat.x] = feat.scale;
}
// Build left side equation matrix
Eigen::SparseMatrix<float> A(pixels, pixels);
A.setFromTriplets(coefficients.begin(), coefficients.end());
/*/// Debug ///
std::ofstream file("Output.m");
cv::Mat_<int> rows(1, coefficients.size()), cols(1, coefficients.size());
cv::Mat_<float> values(1, coefficients.size());
for (i = 0; i < coefficients.size(); ++i)
{
rows.at<int>(i) = coefficients[i].row();
cols.at<int>(i) = coefficients[i].col();
values.at<float>(i) = coefficients[i].value();
}
file << "cpp_rows = " << rows << ";" << std::endl;
file << "cpp_cols = " << cols << ";" << std::endl;
file << "cpp_values = " << values << ";" << std::endl;
/////////////*/
// Solving
Eigen::SparseLU<Eigen::SparseMatrix<float>> slu(A);
Eigen::VectorXf x = slu.solve(b);
// Copy to output
scaleMap.resize(pixels);
memcpy(scaleMap.data(), x.data(), pixels*sizeof(float));
}