本文整理汇总了C++中cv::Mat::dot方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::dot方法的具体用法?C++ Mat::dot怎么用?C++ Mat::dot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::dot方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rayPlaneIntersection
cv::Point3f
rayPlaneIntersection(cv::Point2f uv, const cv::Mat& centroid, const cv::Mat& normal, const cv::Mat_<float>& Kinv)
{
cv::Matx33d dKinv(Kinv);
cv::Vec3d dNormal(normal);
return rayPlaneIntersection(cv::Vec3d(uv.x, uv.y, 1), centroid.dot(normal), dNormal, dKinv);
}
示例2: backProject
void ImageSegmentation::backProject(const cv::Mat &planeNormal, const double planeDistance, const cv::Point2d &pointImage, cv::Mat &pointWorld)
{
pointWorld = cv::Mat(3, 1, CV_64F);
pointWorld.at<double>(0) = pointImage.x;
pointWorld.at<double>(1) = pointImage.y;
pointWorld.at<double>(2) = 1;
double t = -planeDistance / planeNormal.dot(pointWorld);
pointWorld = pointWorld * t;
}
示例3: interpolate
/**
* Interpolate D19 pixel values
*/
void CalibrationFilter::interpolate()
{
// Gaussian interpolation mask
float coeff[9] = {
.707, 1.0, .707,
1.0, 0.0, 1.0,
.707, 1.0, .707 };
const cv::Mat mask(3, 3, CV_32F, coeff);
for(int i=1; i<mFrame32F_big.rows-1; ++i)
{
for(int j=1; j<mFrame32F_big.cols-1; ++j)
{
// Check if pixel is dead
if(mAlive_big.at<unsigned char>(i, j) == 0)
{
// Dead pixel: build interpolation matrix
cv::Mat neighbor( 3, 3, CV_32F );
mAlive_big( cv::Range(i-1, i+2), cv::Range(j-1, j+2) ).convertTo( neighbor, CV_32F );
//QLOG_DEBUG() << TAG << "neighbor sum" << cv::sum( neighbor )[0];
// Compute interpolated value
// 'M' is the same as 'mask' but with 0-coefficient over dead pixels
const cv::Mat M = mask.mul( neighbor, 1.0 );
// 'roi' Region of interest is the 3x3 region centered on the dead pixel
const cv::Mat roi = mFrame32F_big( cv::Range(i-1, i+2), cv::Range(j-1, j+2) );
double sum = cv::sum( M )[0];
if(sum >= 1.0)
{
mFrame32F_big.at<float>(i, j) = M.dot( roi ) / sum;
}
else
{
mFrame32F_big.at<float>(i, j) = 0;
}
}
}
}
}
示例4: complete_message_callback
//.........这里部分代码省略.........
// displaying the first solution if it was found
if (first_solution_found)
{
std::cout << std::endl << "first R: " << first_R << std::endl;
std::cout << "first T: " << first_T << std::endl;
std::cout << "first n: " << first_n << std::endl;
for (double ii : first_solution)
{
std::cout << ii << " ";
}
std::cout << std::endl;
}
// displaying the second solution if it was found
if (second_solution_found)
{
std::cout << std::endl << "second R: " << second_R << std::endl;
std::cout << "second T: " << second_T << std::endl;
std::cout << "second n: " << second_n << std::endl;
for (double ii : second_solution)
{
std::cout << ii << " ";
}
std::cout << std::endl;
}
// because the reference is set to the exact value when when n should have only a z componenet, the correct
// choice should be the one closest to n_ref = [0,0,1]^T which will be the one with the greatest dot product with n_ref
if (first_solution_found && second_solution_found)
{
if (first_n.dot(n_ref) >= second_n.dot(n_ref))
{
R_fc = first_R;
T_fc = first_T;
}
else
{
R_fc = second_R;
T_fc = second_T;
}
fc_found = true;
}
else if(first_solution_found)
{
R_fc = first_R;
T_fc = first_T;
fc_found = true;
}
//if a solution was found will publish
// need to convert to pose message so use
if (fc_found)
{
// converting the rotation from a cv matrix to quaternion, first need it as a matrix3x3
R_fc_tf[0][0] = R_fc.at<double>(0,0);
R_fc_tf[0][1] = R_fc.at<double>(0,1);
R_fc_tf[0][2] = R_fc.at<double>(0,2);
R_fc_tf[1][0] = R_fc.at<double>(1,0);
R_fc_tf[1][1] = R_fc.at<double>(1,1);
R_fc_tf[1][2] = R_fc.at<double>(1,2);
R_fc_tf[2][0] = R_fc.at<double>(2,0);
R_fc_tf[2][1] = R_fc.at<double>(2,1);
R_fc_tf[2][2] = R_fc.at<double>(2,2);
示例5: predictImpl
cv::Mat SGDStep::predictImpl(const bool debugMode,
const cv::Mat &input) const
{
cv::Ptr<SGDConfig> config;
try {
config = config_cast<SGDConfig>(this->mConfig);
} catch(std::bad_cast) {
std::stringstream s;
s << "Wrong config type: " << this->mConfig->identifier();
throw MLError(s.str(), currentMethod, currentLine);
}
std::vector<std::string> classifiers = config->classifierFiles();
if(debugMode) { debug(classifiers.size(), "classifier(s)"); }
cv::Mat1d results(1, classifiers.size());
for(size_t idx = 0; idx < classifiers.size(); ++idx) {
std::string classifierFile = classifiers[idx];
if(debugMode) { debug("Loading classifier", classifierFile); }
std::tuple<cv::Mat1d, double, vl_size> classifierData;
std::tuple<cv::Mat1d, double, vl_size, double, double> plattClassifierData;
try {
if(config->plattScale()) {
plattClassifierData = this->loadWithPlatt(classifierFile);
} else {
classifierData = this->load(classifierFile);
}
if(input.cols != std::get<0>(classifierData).cols) {
std::stringstream s;
s << "Data doesn't fit trained model." << std::endl;
throw MLError(s.str(), currentMethod, currentLine);
} else {
if(input.type() != CV_64F) {
cv::Mat tmp;
input.convertTo(tmp, CV_64F);
if(config->plattScale()) {
double score = tmp.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
results.at<double>(idx) = Platt::sigmoid_predict(score,
std::get<3>(plattClassifierData),
std::get<4>(plattClassifierData));
} else {
double score = tmp.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
results.at<double>(idx) = score;
}
} else {
if(config->plattScale()) {
double score = input.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
results.at<double>(idx) = Platt::sigmoid_predict(score,
std::get<3>(plattClassifierData),
std::get<4>(plattClassifierData));
} else {
double score = input.dot(std::get<0>(classifierData)) + std::get<1>(classifierData);
results.at<double>(idx) = score;
}
}
}
} catch(MLError) {
throw;
}
}
if(config->binary()) {
if(config->plattScale()) {
double min, max;
cv::Point minIdx, maxIdx;
cv::minMaxLoc(results, &min, &max, &minIdx, &maxIdx);
int32_t best = maxIdx.x;
results.setTo(0);
results.at<double>(best) = 1;
} else {
results.setTo(1, results > 0);
results.setTo(-1, results < 0);
}
}
return results;
}
示例6: isInFrustum
bool Frame::isInFrustum(MapPoint *pMP, float viewingCosLimit)
{
pMP->mbTrackInView = false;
// 3D in absolute coordinates
cv::Mat P = pMP->GetWorldPos();
// 3D in camera coordinates
const cv::Mat Pc = mRcw*P+mtcw;
const float PcX = Pc.at<float>(0);
const float PcY= Pc.at<float>(1);
const float PcZ = Pc.at<float>(2);
// Check positive depth
if(PcZ<0.0)
return false;
// Project in image and check it is not outside
const float invz = 1.0/PcZ;
const float u=fx*PcX*invz+cx;
const float v=fy*PcY*invz+cy;
if(u<mnMinX || u>mnMaxX)
return false;
if(v<mnMinY || v>mnMaxY)
return false;
// Check distance is in the scale invariance region of the MapPoint
const float maxDistance = pMP->GetMaxDistanceInvariance();
const float minDistance = pMP->GetMinDistanceInvariance();
const cv::Mat PO = P-mOw;
const float dist = cv::norm(PO);
if(dist<minDistance || dist>maxDistance)
return false;
// Check viewing angle
cv::Mat Pn = pMP->GetNormal();
float viewCos = PO.dot(Pn)/dist;
if(viewCos<viewingCosLimit)
return false;
// Predict scale level acording to the distance
float ratio = dist/minDistance;
vector<float>::iterator it = lower_bound(mvScaleFactors.begin(), mvScaleFactors.end(), ratio);
int nPredictedLevel = it-mvScaleFactors.begin();
if(nPredictedLevel>=mnScaleLevels)
nPredictedLevel=mnScaleLevels-1;
// Data used by the tracking
pMP->mbTrackInView = true;
pMP->mTrackProjX = u;
pMP->mTrackProjY = v;
pMP->mnTrackScaleLevel= nPredictedLevel;
pMP->mTrackViewCos = viewCos;
return true;
}