本文整理汇总了C++中cv::Mat::convertTo方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::convertTo方法的具体用法?C++ Mat::convertTo怎么用?C++ Mat::convertTo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::convertTo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: translucentEdge
void CutoutImage::translucentEdge( const cv::Mat srcMat, const cv::Mat smoothMask, const cv::Mat liteMask, cv::Mat &dstMat ) //liteMask 是边缘模糊之前的数据
{
cv::Mat smoothMask8uc1;
smoothMask.convertTo(smoothMask8uc1, CV_8UC1 ,255.0);
int rows = smoothMask8uc1.rows;
int cols = smoothMask8uc1.cols;
//cv::threshold(smoothMask8uc1, bitSmoothMask, 1, 255, CV_THRESH_BINARY );
cv::Mat srcMatClone = srcMat.clone();
cv::Mat liteMaskClone = liteMask.clone();
cv::Mat edgeSmoothMask = cv::Mat( smoothMask.size(), CV_8UC1, cv::Scalar(0));
cv::cvtColor(srcMatClone, srcMatClone, CV_BGR2BGRA);
for(int y = 0; y < rows; y++){
uchar *liteMaskCloneRowData = liteMaskClone.ptr<uchar>(y);
uchar *smoothMask8uc1RowData = smoothMask8uc1.ptr<uchar>(y);
uchar *srcMatCloneRowData = srcMatClone.ptr<uchar>(y);
for (int x = 0; x < cols; x++) {
//if(smoothMask8uc1RowData[x] != 0 && liteMaskCloneRowData[x] == 0)
//if(smoothMask8uc1RowData[x] != 0 && smoothMask8uc1RowData[x] != 255)
{
srcMatCloneRowData[x*4 + 3] = smoothMask8uc1RowData[x];
}
}
}
dstMat = srcMatClone.clone();
//cv::imwrite("cc.png", srcMat);
//cv::imwrite("ccc.png", srcMatClone);
//cv::waitKey(0);
}
示例2: autocorrDFT
void AutoCorr::autocorrDFT(const cv::Mat &img, cv::Mat &dst)
{
//Convert image from unsigned char to float matrix
cv::Mat fImg;
img.convertTo(fImg, CV_32FC1);
//Subtract the mean
cv::Mat mean(fImg.size(), fImg.type(), cv::mean(fImg));
cv::subtract(fImg, mean, fImg);
//Calculate the optimal size for the dft output.
//This increases speed.
cv::Size dftSize;
dftSize.width = cv::getOptimalDFTSize(2 * img.cols +1 );
dftSize.height = cv::getOptimalDFTSize(2 * img.rows +1);
//prepare the destination for the dft
dst = cv::Mat(dftSize, CV_32FC1, cv::Scalar::all(0));
//transform the image into the frequency domain
cv::dft(fImg, dst);
//calculate DST * DST (don't mind the fourth parameter. It is ignored)
cv::mulSpectrums(dst, dst, dst, cv::DFT_INVERSE, true);
//transform the result back to the image domain
cv::dft(dst, dst, cv::DFT_INVERSE | cv::DFT_SCALE);
//norm the result
cv::multiply(fImg,fImg,fImg);
float denom = cv::sum(fImg)[0];
dst = dst * (1/denom);
}
示例3: error
Array<float> CvMatToOpOutput::createArray(const cv::Mat& cvInputData, const double scaleInputToOutput,
const Point<int>& outputResolution) const
{
try
{
// Security checks
if (cvInputData.empty())
error("Wrong input element (empty cvInputData).", __LINE__, __FUNCTION__, __FILE__);
if (cvInputData.channels() != 3)
error("Input images must be 3-channel BGR.", __LINE__, __FUNCTION__, __FILE__);
if (cvInputData.cols <= 0 || cvInputData.rows <= 0)
error("Input images has 0 area.", __LINE__, __FUNCTION__, __FILE__);
if (outputResolution.x <= 0 || outputResolution.y <= 0)
error("Output resolution has 0 area.", __LINE__, __FUNCTION__, __FILE__);
// outputData - Reescale keeping aspect ratio and transform to float the output image
const cv::Mat frameWithOutputSize = resizeFixedAspectRatio(cvInputData, scaleInputToOutput,
outputResolution);
Array<float> outputData({outputResolution.y, outputResolution.x, 3});
frameWithOutputSize.convertTo(outputData.getCvMat(), CV_32FC3);
// Return result
return outputData;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return Array<float>{};
}
}
示例4: niblackThreshold
void niblackThreshold( const cv::Mat& src, cv::Mat& dst, int windowSize ,int c, float k)
{
cv::Mat meanMat, std_dev, srcf;
src.convertTo( srcf, CV_32FC1 );
#ifdef USE_OPENCV
cv::Mat mean2;
cv::Size window(windowSize, windowSize);
cv::blur(srcf, meanMat, window);
cv::blur(srcf.mul(srcf), mean2, window);
cv::sqrt(mean2 - meanMat.mul(meanMat), std_dev);
#else
meanWithDeviation(srcf,meanMat,std_dev,windowSize);
#endif
dst = cv::Mat( src.size(), src.type() );
for( int j = 0; j < dst.rows; ++j ) {
for( int i = 0; i < dst.cols; ++i )
{
//pixel = ( pixel > mean + k * standard_deviation - c) ? object : background
dst.at<uchar>(j,i) = ( srcf.at<float>(j,i) > meanMat.at<float>(j,i) + k * std_dev.at<float>(j,i) - c ) ? 255 : 0;
}
}
return;
}
示例5: show_depth
void show_depth(const cv::Mat& depth)
{
cv::Mat display;
//cv::normalize(depth, display, 0, 255, cv::NORM_MINMAX, CV_8U);
depth.convertTo(display, CV_8U, 255.0/4000);
cv::imshow("Depth", display);
}
示例6:
int EMVisi2::setModel(const cv::Mat im1, const cv::Mat mask)
{
if (proba.empty()) {
dL = cv::Mat(im1.size(), CV_32FC1);
ncc = cv::Mat(im1.size(), CV_32FC1);
sum = cv::Mat(im1.size(), CV_32FC1);
proba = cv::Mat(im1.size(), CV_32FC1);
visi_proba = cv::Mat(im1.size(), CV_32FC1);
nccproba_v = cv::Mat(im1.size(), CV_32FC1);
nccproba_h = cv::Mat(im1.size(), CV_32FC1);
ratio = cv::Mat(im1.size(), CV_32FC(im1.channels()));
im1f = cv::Mat(im1.size(), CV_32FC(im1.channels()));
}
if (im1.channels() > 1) {
cv::Mat gray;
cv::cvtColor(im1, gray, COLOR_RGB2GRAY);
fncc.setModel(gray, mask);
} else {
fncc.setModel(im1, mask);
}
im1.convertTo(im1f, im1f.type());
this->mask = mask;
return 0;
}
示例7: DrawHistogram
void DrawHistogram(const cv::Mat& histogram, cv::Mat& draw_img, int width)
{
double minval, maxval;
cv::minMaxLoc(histogram, &minval, &maxval);
cv::Mat norm_hist;
histogram.convertTo(norm_hist, CV_32FC1, (double)width / maxval);
if(histogram.cols == 1){
draw_img = cv::Mat::zeros(histogram.rows, width, CV_8UC1);
for(int r=0; r<histogram.rows; r++){
for(int c=0; c<norm_hist.at<float>(r,0); c++){
draw_img.at<unsigned char>(r, c) = 255;
}
}
}
else if(histogram.rows == 1){
draw_img = cv::Mat::zeros(width, histogram.cols, CV_8UC1);
for(int c=0; c<histogram.cols; c++){
for(int r=0; r<norm_hist.at<float>(0,c); r++){
draw_img.at<unsigned char>(width - r -1, c) = 255;
}
}
}
}
示例8: appendNodeFeatures
void drwnNNGraphImage::appendNodeFeatures(const drwnNNGraphImageData& image, const cv::Mat& features)
{
DRWN_ASSERT(((int)image.width() == features.cols) && ((int)image.height() == features.rows));
DRWN_ASSERT(image.numSegments() == this->numNodes());
// convert to 32-bit floating point (if not already)
if (features.depth() != CV_32F) {
cv::Mat tmp(features.rows, features.cols, CV_8U);
features.convertTo(tmp, CV_32F, 1.0, 0.0);
return appendNodeFeatures(image, tmp);
}
// compute mean pixel feature over each superpixel
vector<float> phi(image.numSegments(), 0.0f);
for (unsigned y = 0; y < image.height(); y++) {
for (unsigned x = 0; x < image.width(); x++) {
const float p = features.at<float>(y, x);
for (int c = 0; c < image.segments().channels(); c++) {
const int segId = image.segments()[c].at<int>(y, x);
if (segId < 0) continue;
phi[segId] += p;
}
}
}
for (unsigned segId = 0; segId < phi.size(); segId++) {
DRWN_ASSERT(isfinite(phi[segId]));
VectorXf newFeatures(_nodes[segId].features.rows() + 1);
newFeatures.head(_nodes[segId].features.rows()) = _nodes[segId].features;
newFeatures[_nodes[segId].features.rows()] = phi[segId] / (float)image.segments().pixels(segId);
_nodes[segId].features = newFeatures;
}
}
示例9: setRef
void ProcessDepth::setRef(cv::Mat image, bool improve)
{
if(improve)
{
for(int imgY= 0; imgY<image.rows;imgY++)
{
cv::Mat lineOfImage = image.row(imgY);
std::vector<u_int16_t> tempSort;
// std::nth_element(line.begin<u_int16_t>, line.begin<u_int16_t> + line.cols/2, line.end<u_int16_t>());
cv::MatIterator_<u_int16_t> it, end;
for( it = lineOfImage.begin<u_int16_t>(), end = lineOfImage.end<u_int16_t>(); it != end; ++it)
{
tempSort.push_back(*it);
}
std::nth_element(tempSort.begin(), tempSort.begin() + tempSort.size()/2, tempSort.end());
u_int16_t median = tempSort[tempSort.size()/2];
cv::line( image,cv::Point2i(0,imgY),cv::Point2i(image.cols,imgY),median,1,8);
}
image.convertTo(mRefImage,CV_32FC1);
// RefImage = image;
}
else
{
mRefImage = image;
}
}
示例10: update
void RunningBackground::update(cv::Mat frame, cv::Mat& thresholded) {
if(needToReset || accumulator.empty()) {
needToReset = false;
frame.convertTo(accumulator, CV_32F);
}
accumulator.convertTo(background, CV_8U);
switch(differenceMode) {
case ABSDIFF: cv::absdiff(background, frame, foreground); break;
case BRIGHTER: cv::subtract(frame, background, foreground); break;
case DARKER: cv::subtract(background, frame, foreground); break;
}
ofxCv::copyGray(foreground, foregroundGray);
int thresholdMode = ignoreForeground ? cv::THRESH_BINARY_INV : cv::THRESH_BINARY;
cv::threshold(foregroundGray, thresholded, thresholdValue, 255, thresholdMode);
float curLearningRate = learningRate;
if(useLearningTime) {
curLearningRate = 1. - powf(1. - (thresholdValue / 255.), 1. / learningTime);
}
if(ignoreForeground) {
cv::accumulateWeighted(frame, accumulator, curLearningRate, thresholded);
cv::bitwise_not(thresholded, thresholded);
} else {
cv::accumulateWeighted(frame, accumulator, curLearningRate);
}
}
示例11: sample
void DepthSampler::sample(cv::Mat source, std::vector<cv::Mat> layers, std::vector<cv::Point2i>& out){
cv::Mat localSource;
std::vector<cv::Mat> tempLayers;
source.convertTo(localSource,CV_32FC3);
cv::split(localSource, tempLayers);
cv::Mat dM(localSource.rows,localSource.cols, CV_32FC1);
dM=(tempLayers[1]*256) + tempLayers[2];
out.resize(0);
int nLayers=layers.size();
float localStep=minInd+(step*(double)nLayers);
for ( std::vector<cv::Mat>::iterator it= layers.begin(); it != layers.end(); it++){
for (int xIm=0; xIm< source.cols; xIm+=(int)localStep) {
for (int yIm=0; yIm<source.rows ; yIm+=(int)localStep) {
if ((int)it->at<char>(yIm,xIm) != 0){
float d=dM.at<float>(yIm,xIm);
if ((d != 0)&&(d<maxDistance)){
out.push_back(cv::Point2i(xIm,yIm));
}
}
}
}
localStep=localStep-step;
}
}
示例12: correctBritness
void ControllerImageFusion::correctBritness(cv::Mat& image, cv::Mat source)
{
float colRatioDb = image.cols/source.cols;
float rowRatioDb = image.rows/source.rows;
float val;
if(std::modf(colRatioDb, &val)!= 0 || std::modf(rowRatioDb, &val) != 0)
{
return;
}
int colRatio = (int)(colRatioDb);
int rowRatio = (int)(rowRatioDb);
if(colRatio<=1 || rowRatio<=1)
{
return;
}
cv::Mat sourceConverted;
source.convertTo(sourceConverted, CV_32F);
int rows = source.rows;
int cols = source.cols;
for(int i = 0; i<rows-1; i++)
{
const float* Mi = sourceConverted.ptr<float>(i);
for(int j = 0; j<cols-1; j++)
{
cv::Mat temp = cv::Mat(image, cv::Rect(j*colRatio, i*rowRatio, colRatio, rowRatio));
cv::Scalar mean = cv::mean(temp);
mean.val[0] = Mi[j]-mean.val[0];
cv::add(temp, mean, temp);
}
}
}
示例13: task1
bool task1(const cv::Mat& image) {
cv::Mat manual, buildIn, diff, tmp;
std::vector<cv::Mat> channels;
scaleImage(image, manual, 2, 100);
image.convertTo(buildIn, -1, 2, 100);
cv::absdiff(manual, buildIn, diff);
cv::split(diff, channels);
std::cout << "Max difference element: ";
for (VMit it = channels.begin(); it != channels.end(); ++it) {
int max = *(std::max_element((*it).begin<uchar>(), (*it).end<uchar>()));
std::cout << max << " ";
}
std::cout << std::endl;
std::vector<cv::Mat> scales(5, cv::Mat());
std::vector<cv::Mat> dst(2, cv::Mat());
for (int i = 0; i < task1c; ++i) {
scaleImage(image, scales[i], task1v[2 * i], task1v[2 * i + 1]);
}
concatImages(scales[0], scales[1], dst[0]);
concatChannels(dst[0], dst[0]);
concatImages(scales[2], scales[3], dst[1]);
concatImages(dst[1], scales[4], dst[1]);
concatChannels(dst[1], dst[1]);
return cv::imwrite(PATH + "Task1Lena01.jpg", dst[0]) && cv::imwrite(PATH + "Task1Lena345.jpg", dst[1]);
}
示例14: performHighPass
void performHighPass(const cv::Mat& image, cv::Mat& res, int rad) {
cv::Mat grey, tmp;
cv::cvtColor(image, grey, CV_BGR2GRAY);
grey.convertTo(grey, CV_32F);
grey.copyTo(res);
res.convertTo(res, CV_8U);
std::vector<cv::Mat> planes(2, cv::Mat());
std::vector<cv::Mat> polar(2, cv::Mat());
cv::dft(grey, tmp, cv::DFT_COMPLEX_OUTPUT);
cv::split(tmp, planes);
cv::cartToPolar(planes[0], planes[1], polar[0], polar[1]);
visualization(polar[0], tmp);
concatImages(res, tmp, res);
rearrangeQuadrants(polar[0]);
highPassFilter(polar[0], rad);
rearrangeQuadrants(polar[0]);
visualization(polar[0], tmp);
tmp.convertTo(tmp, res.type());
concatImages(res, tmp, res);
cv::polarToCart(polar[0], polar[1], planes[0], planes[1]);
cv::merge(planes, tmp);
cv::dft(tmp, tmp, cv::DFT_SCALE | cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT);
tmp.convertTo(tmp, CV_8U);
concatImages(res, tmp, res);
}
示例15: rescaleImageIntensity
cv::Mat rescaleImageIntensity(const cv::Mat& img, ScaleType type) {
cv::Mat rval;
if (type == ScaleNone) {
if (img.depth() == CV_8U) {
rval = img.clone();
} else {
double fmin, fmax;
cv::minMaxLoc(img, &fmin, &fmax);
if (fmax - fmin <= 1) {
rval = cv::Mat(img.rows, img.cols, CV_8U);
cv::convertScaleAbs(img, rval, 255);
} else {
img.convertTo(rval, CV_8U);
}
}
} else {
cv::Mat fsrc;
if (img.depth() != at::REAL_IMAGE_TYPE) {
img.convertTo(fsrc, at::REAL_IMAGE_TYPE);
} else {
fsrc = img;
}
cv::Mat tmp;
if (type == ScaleMinMax) {
double fmin, fmax;
cv::minMaxLoc(fsrc, &fmin, &fmax);
tmp = 255*((fsrc-fmin)/(fmax-fmin));
} else {
at::real fmag = cv::norm(fsrc, cv::NORM_INF);
tmp = 127 + 127*fsrc/fmag;
}
tmp.convertTo(rval, CV_8U);
}
return rval;
}