本文整理汇总了C++中cv::Mat::size方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::size方法的具体用法?C++ Mat::size怎么用?C++ Mat::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: process
void PBASFrameProcessor::process(cv:: Mat &frame, cv:: Mat &output)
{
const int medFilterSize = 7;
double meanGradMagn;
std::vector<AmbiguousCandidate> candidates;
cv::Mat subSamplingOffset = cv::Mat::zeros(frame.size(), CV_32F);
if (m_iteration == 0)
{
m_lastResult = cv::Mat::zeros(frame.size(), CV_8U);
m_lastResultPP = cv::Mat::zeros(frame.size(), CV_8U);
m_noiseMap = cv::Mat::zeros(frame.size(), CV_32F);
m_gradMagnMap.create(frame.size(), CV_8U);
m_motionDetector.initialize(frame);
}
if (m_iteration != 0)
m_motionDetector.updateMotionMap(frame);
//updateGradMagnMap(frame);
std::vector<PBASFeature> featureMap = PBASFeature::calcFeatureMap(frame);
//meanGradMagn = PBASFeature::calcMeanGradMagn(featureMap, frame.rows, frame.cols);
//PBASFeature::setColorWeight(0.8 - meanGradMagn / 255);
m_pbas.process(featureMap, frame.rows, frame.cols, m_currentResult, m_motionDetector.getMotionMap());
if (m_pbas.isInitialized())
{
m_motionDetector.updateCandidates(m_currentResultPP);
m_motionDetector.classifyStaticCandidates(frame, m_pbas.drawBgSample(), m_currentResultPP);
candidates = m_motionDetector.getStaticObjects();
for (int k = 0; k < candidates.size(); ++k)
{
if (candidates[k].state == DETECTOR_GHOST_OBJECT)
{
subSamplingOffset(candidates[k].boundingBox) -= 100;
}
else if ((candidates[k].state == DETECTOR_STATIC_OBJECT))
{
subSamplingOffset(candidates[k].boundingBox) += 50;
}
}
m_pbas.subSamplingOffset(subSamplingOffset);
#ifndef _DATASET
MotionDetector::drawBoundingBoxesClassified(frame, m_outputWithBb, candidates);
#endif
}
// !_DATASET
// normalize gradient magnmap
//parallelBackgroundAveraging(&rgbChannels, false, &m_currentResult);
//###############################################
//POST-PROCESSING HERE
//for the final results in the changedetection-challenge a 9x9 median filter has been applied
cv::medianBlur(m_currentResult, m_currentResultPP, medFilterSize);
//###############################################
m_currentResultPP.copyTo(output);
updateNoiseMap();
m_currentResult.copyTo(m_lastResult);
++m_iteration;
}
示例2: RGBDOdometry
bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
const cv::Mat& image0, const cv::Mat& _depth0, const cv::Mat& validMask0,
const cv::Mat& image1, const cv::Mat& _depth1, const cv::Mat& validMask1,
const cv::Mat& cameraMatrix, float minDepth, float maxDepth, float maxDepthDiff,
const std::vector<int>& iterCounts, const std::vector<float>& minGradientMagnitudes,
int transformType )
{
const int sobelSize = 3;
const double sobelScale = 1./8;
Mat depth0 = _depth0.clone(),
depth1 = _depth1.clone();
// check RGB-D input data
CV_Assert( !image0.empty() );
CV_Assert( image0.type() == CV_8UC1 );
CV_Assert( depth0.type() == CV_32FC1 && depth0.size() == image0.size() );
CV_Assert( image1.size() == image0.size() );
CV_Assert( image1.type() == CV_8UC1 );
CV_Assert( depth1.type() == CV_32FC1 && depth1.size() == image0.size() );
// check masks
CV_Assert( validMask0.empty() || (validMask0.type() == CV_8UC1 && validMask0.size() == image0.size()) );
CV_Assert( validMask1.empty() || (validMask1.type() == CV_8UC1 && validMask1.size() == image0.size()) );
// check camera params
CV_Assert( cameraMatrix.type() == CV_32FC1 && cameraMatrix.size() == Size(3,3) );
// other checks
CV_Assert( iterCounts.empty() || minGradientMagnitudes.empty() ||
minGradientMagnitudes.size() == iterCounts.size() );
CV_Assert( initRt.empty() || (initRt.type()==CV_64FC1 && initRt.size()==Size(4,4) ) );
std::vector<int> defaultIterCounts;
std::vector<float> defaultMinGradMagnitudes;
std::vector<int> const* iterCountsPtr = &iterCounts;
std::vector<float> const* minGradientMagnitudesPtr = &minGradientMagnitudes;
if( iterCounts.empty() || minGradientMagnitudes.empty() )
{
defaultIterCounts.resize(4);
defaultIterCounts[0] = 7;
defaultIterCounts[1] = 7;
defaultIterCounts[2] = 7;
defaultIterCounts[3] = 10;
defaultMinGradMagnitudes.resize(4);
defaultMinGradMagnitudes[0] = 12;
defaultMinGradMagnitudes[1] = 5;
defaultMinGradMagnitudes[2] = 3;
defaultMinGradMagnitudes[3] = 1;
iterCountsPtr = &defaultIterCounts;
minGradientMagnitudesPtr = &defaultMinGradMagnitudes;
}
preprocessDepth( depth0, depth1, validMask0, validMask1, minDepth, maxDepth );
std::vector<Mat> pyramidImage0, pyramidDepth0,
pyramidImage1, pyramidDepth1, pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1,
pyramidCameraMatrix;
buildPyramids( image0, image1, depth0, depth1, cameraMatrix, sobelSize, sobelScale, *minGradientMagnitudesPtr,
pyramidImage0, pyramidDepth0, pyramidImage1, pyramidDepth1,
pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1, pyramidCameraMatrix );
Mat resultRt = initRt.empty() ? Mat::eye(4,4,CV_64FC1) : initRt.clone();
Mat currRt, ksi;
for( int level = (int)iterCountsPtr->size() - 1; level >= 0; level-- )
{
const Mat& levelCameraMatrix = pyramidCameraMatrix[level];
const Mat& levelImage0 = pyramidImage0[level];
const Mat& levelDepth0 = pyramidDepth0[level];
Mat levelCloud0;
cvtDepth2Cloud( pyramidDepth0[level], levelCloud0, levelCameraMatrix );
const Mat& levelImage1 = pyramidImage1[level];
const Mat& levelDepth1 = pyramidDepth1[level];
const Mat& level_dI_dx1 = pyramid_dI_dx1[level];
const Mat& level_dI_dy1 = pyramid_dI_dy1[level];
CV_Assert( level_dI_dx1.type() == CV_16S );
CV_Assert( level_dI_dy1.type() == CV_16S );
const double fx = levelCameraMatrix.at<double>(0,0);
const double fy = levelCameraMatrix.at<double>(1,1);
const double determinantThreshold = 1e-6;
Mat corresps( levelImage0.size(), levelImage0.type() );
// Run transformation search on current level iteratively.
for( int iter = 0; iter < (*iterCountsPtr)[level]; iter ++ )
{
int correspsCount = computeCorresp( levelCameraMatrix, levelCameraMatrix.inv(), resultRt.inv(DECOMP_SVD),
levelDepth0, levelDepth1, pyramidTexturedMask1[level], maxDepthDiff,
corresps );
if( correspsCount == 0 )
break;
//.........这里部分代码省略.........
示例3: abs
cv::Mat
Auvsi_Recognize::centerBinary( cv::Mat input )
{
typedef cv::Vec<unsigned char, 1> VT_binary;
cv::Mat buffered = cv::Mat( input.rows * 2, input.cols * 2, input.type() );
cv::Mat retVal;
int centerX, centerY;
int minX, minY, maxX, maxY;
int radiusX, radiusY;
std::vector<int> xCoords;
std::vector<int> yCoords;
// Get centroid
cv::Moments imMoments = cv::moments( input, true );
centerX = (imMoments.m10 / imMoments.m00) - buffered.cols / 2;
centerY = (imMoments.m01 / imMoments.m00) - buffered.rows / 2;
// Get centered x and y coordinates
cv::MatIterator_<VT_binary> inputIter = input.begin<VT_binary>();
cv::MatIterator_<VT_binary> inputEnd = input.end<VT_binary>();
for( ; inputIter != inputEnd; ++inputIter )
{
unsigned char value = (*inputIter)[0];
if( value )
{
xCoords.push_back( inputIter.pos().x - centerX );
yCoords.push_back( inputIter.pos().y - centerY );
}
}
if( xCoords.size() <= 0 || yCoords.size() <= 0 ) // nothing in image
{
return input;
}
// Get min and max x and y coords (centered)
minX = *std::min_element( xCoords.begin(), xCoords.end() );
minY = *std::min_element( yCoords.begin(), yCoords.end() );
maxX = *std::max_element( xCoords.begin(), xCoords.end() );
maxY = *std::max_element( yCoords.begin(), yCoords.end() );
// Get new centroids
centerX = getVectorMean<int>( xCoords );
centerY = getVectorMean<int>( yCoords );
// Get radius from center in each direction
radiusX = std::max( abs(maxX - centerX), abs(centerX - minX) );
radiusY = std::max( abs(maxY - centerY), abs(centerY - minY) );
// Center image in temporary buffered array
buffered = cvScalar(0);
std::vector<int>::iterator iterX = xCoords.begin();
std::vector<int>::iterator endX = xCoords.end();
std::vector<int>::iterator iterY = yCoords.begin();
for( ; iterX != endX; ++iterX, ++iterY )
{
buffered.at<VT_binary>( *iterY, *iterX ) = VT_binary(255);
}
// Center image
buffered = buffered.colRange( centerX - radiusX, centerX + radiusX + 1 );
buffered = buffered.rowRange( centerY - radiusY, centerY + radiusY + 1 );
// Add extra padding to make square
int outH, outW;
outH = buffered.rows;
outW = buffered.cols;
if( outH < outW ) // pad height
cv::copyMakeBorder( buffered, retVal, (outW-outH)/2, (outW-outH)/2, 0, 0, cv::BORDER_CONSTANT, cvScalar(0) );
else // pad width
cv::copyMakeBorder( buffered, retVal, 0, 0, (outH-outW)/2, (outH-outW)/2, cv::BORDER_CONSTANT, cvScalar(0) );
// Make sure output is desired width
cv::resize( retVal, buffered, input.size(), 0, 0, cv::INTER_NEAREST );
return buffered;
}
示例4: orientation
cv::Mat orientation(cv::Mat inputImage)
{
cv::Mat orientationMat = cv::Mat::zeros(inputImage.size(), CV_8UC1);
// compute gradients at each pixel
cv::Mat grad_x, grad_y;
cv::Sobel(inputImage, grad_x, CV_16SC1, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT);
cv::Sobel(inputImage, grad_y, CV_16SC1, 0, 1, 3, 1, 0, cv::BORDER_DEFAULT);
cv::Mat Vx, Vy, theta, lowPassX, lowPassY;
cv::Mat lowPassX2, lowPassY2;
Vx = cv::Mat::zeros(inputImage.size(), inputImage.type());
Vx.copyTo(Vy);
Vx.copyTo(theta);
Vx.copyTo(lowPassX);
Vx.copyTo(lowPassY);
Vx.copyTo(lowPassX2);
Vx.copyTo(lowPassY2);
// estimate the local orientation of each block
int blockSize = 16;
for (int i = blockSize / 2; i < inputImage.rows - blockSize / 2; i += blockSize)
{
for (int j = blockSize / 2; j < inputImage.cols - blockSize / 2; j += blockSize)
{
float sum1 = 0.0;
float sum2 = 0.0;
for (int u = i - blockSize / 2; u < i + blockSize / 2; u++)
{
for (int v = j - blockSize / 2; v < j + blockSize / 2; v++)
{
sum1 += grad_x.at<float>(u, v) * grad_y.at<float>(u, v);
sum2 += (grad_x.at<float>(u, v)*grad_x.at<float>(u, v)) * (grad_y.at<float>(u, v)*grad_y.at<float>(u, v));
}
}
Vx.at<float>(i, j) = sum1;
Vy.at<float>(i, j) = sum2;
double calc = 0.0;
if (sum1 != 0 && sum2 != 0)
{
calc = 0.5 * atan(Vy.at<float>(i, j) / Vx.at<float>(i, j));
}
theta.at<float>(i, j) = calc;
// Perform low-pass filtering
float angle = 2 * calc;
lowPassX.at<float>(i, j) = cos(angle * CV_PI / 180);
lowPassY.at<float>(i, j) = sin(angle * CV_PI / 180);
float sum3 = 0.0;
float sum4 = 0.0;
int lowPassSize;
for (int u = -lowPassSize / 2; u < lowPassSize / 2; u++)
{
for (int v = -lowPassSize / 2; v < lowPassSize / 2; v++)
{
sum3 += inputImage.at<float>(u, v) * lowPassX.at<float>(i - u*lowPassSize, j - v * lowPassSize);
sum4 += inputImage.at<float>(u, v) * lowPassY.at<float>(i - u*lowPassSize, j - v * lowPassSize);
}
}
lowPassX2.at<float>(i, j) = sum3;
lowPassY2.at<float>(i, j) = sum4;
float calc2 = 0.0;
if (sum3 != 0 && sum4 != 0)
{
calc2 = 0.5 * atan(lowPassY2.at<float>(i, j) / lowPassX2.at<float>(i, j)) * 180 / CV_PI;
}
orientationMat.at<float>(i, j) = calc2;
}
}
return orientationMat;
}
示例5: gradient
/*Function to calculate the integral histogram*/
std::vector<cv::Mat> calculateIntegralHOG(const cv::Mat& _in, const int _nbins) {
/*Convert the input image to grayscale*/
// cv::Mat img_gray;
// cv::cvtColor(_in, img_gray, CV_BGR2GRAY);
// cv::equalizeHist(img_gray, img_gray);
/* Calculate the derivates of the grayscale image in the x and y
directions using a sobel operator and obtain 2 gradient images
for the x and y directions*/
cv::Mat xsobel, ysobel;
cv::Sobel(_in, xsobel, CV_32FC1, 1, 0);
cv::Sobel(_in, ysobel, CV_32FC1, 0, 1);
//Gradient magnitude
cv::Mat gradient_magnitude;
cv::magnitude(xsobel, ysobel, gradient_magnitude);
//Gradient orientation
cv::Mat gradient_orientation;
bool angleInDegrees = true;
cv::phase(xsobel, ysobel, gradient_orientation, angleInDegrees);
// std::cout << "_in=" << _in.rows << "x" << _in.cols << std::endl;
// std::cout << "gradient_magnitude=" << gradient_magnitude.rows << "x" << gradient_magnitude.cols << std::endl;
// std::cout << "gradient_orientation=" << gradient_orientation.rows << "x" << gradient_orientation.cols << std::endl;
//
// double min_angle, max_angle;
// cv::minMaxLoc(gradient_orientation, &min_angle, &max_angle, NULL, NULL);
// std::cout << "min_angle=" << min_angle << " ; max_angle=" << max_angle << std::endl;
//TODO: useless ?
// img_gray.release();
/* Create an array of 9 images (9 because I assume bin size 20 degrees
and unsigned gradient ( 180/20 = 9), one for each bin which will have
zeroes for all pixels, except for the pixels in the original image
for which the gradient values correspond to the particular bin.
These will be referred to as bin images. These bin images will be then
used to calculate the integral histogram, which will quicken
the calculation of HOG descriptors */
std::vector<cv::Mat> bins(_nbins);
for (int i = 0; i < _nbins; i++) {
bins[i] = cv::Mat::zeros(_in.size(), CV_32FC1);
}
/* Create an array of 9 images ( note the dimensions of the image,
the cvIntegral() function requires the size to be that), to store
the integral images calculated from the above bin images.
These 9 integral images together constitute the integral histogram */
std::vector<cv::Mat> integrals(_nbins);
//IplImage** integrals = (IplImage**) malloc(9 * sizeof(IplImage*));
for (int i = 0; i < _nbins; i++) {
integrals[i] = cv::Mat(
cv::Size(_in.size().width + 1, _in.size().height + 1), CV_64FC1);
}
/* Calculate the bin images. The magnitude and orientation of the gradient
at each pixel is calculated using the xsobel and ysobel images.
{Magnitude = sqrt(sq(xsobel) + sq(ysobel) ), gradient = itan (ysobel/xsobel) }.
Then according to the orientation of the gradient, the value of the
corresponding pixel in the corresponding image is set */
int x, y;
float temp_gradient, temp_magnitude;
for (y = 0; y < _in.size().height; y++) {
/* ptr1 and ptr2 point to beginning of the current row in the xsobel and ysobel images
respectively.
ptrs[i] point to the beginning of the current rows in the bin images */
#if 0
float* xsobelRowPtr = (float*) (xsobel.row(y).data);
float* ysobelRowPtr = (float*) (ysobel.row(y).data);
float** binsRowPtrs = new float *[_nbins];
for (int i = 0; i < _nbins; i++) {
binsRowPtrs[i] = (float*) (bins[i].row(y).data);
}
#else
float* xsobelRowPtr = xsobel.ptr<float>(y);
float* ysobelRowPtr = ysobel.ptr<float>(y);
float** binsRowPtrs = new float *[_nbins];
for (int i = 0; i < _nbins; i++) {
binsRowPtrs[i] = bins[i].ptr<float>(y);
}
float* magnitudeRowPtr = gradient_magnitude.ptr<float>(y);
float* orientationRowPtr = gradient_orientation.ptr<float>(y);
#endif
/*For every pixel in a row gradient orientation and magnitude
are calculated and corresponding values set for the bin images. */
for (x = 0; x < _in.size().width; x++) {
#if 0
/* if the xsobel derivative is zero for a pixel, a small value is
//.........这里部分代码省略.........
示例6: optimized_elec
void optimized_elec(const PointCloud<pcl::PointXYZRGB> &cloud, const cv::Mat& src_labels, float tolerance,
std::vector<std::vector<PointIndices> > &labeled_clusters,
unsigned int min_pts_per_cluster, unsigned int max_pts_per_cluster, unsigned int num_parts,
bool brute_force_border, float radius_scale)
{
typedef unsigned char uchar;
Size sz = src_labels.size();
cv::Mat dst_labels(sz, CV_32S, Scalar(-1));
cv::Mat wavefront(sz, CV_32SC2);
cv::Mat region_sizes = Mat::zeros(sz, CV_32S);
Point2i *wf = wavefront.ptr<Point2i>();
int *rsizes = region_sizes.ptr<int>();
int cc = -1;
double squared_radius = tolerance * tolerance;
for(int j = 0; j < sz.height; ++j)
{
for(int i = 0; i < sz.width; ++i)
{
if (src_labels.at<uchar>(j, i) >= num_parts || dst_labels.at<int>(j, i) != -1) // invalid label && has been labeled
continue;
Point2i* ws = wf; // initialize wavefront
Point2i p(i, j); // current pixel
cc++; // next label
dst_labels.at<int>(j, i) = cc;
int count = 0; // current region size
// wavefront propagation
while( ws >= wf ) // wavefront not empty
{
// put neighbors onto wavefront
const uchar* sl = &src_labels.at<uchar>(p.y, p.x);
int* dl = &dst_labels.at< int>(p.y, p.x);
const pcl::PointXYZRGB *sp = &cloud.at(p.x, p.y);
//right
if( p.x < sz.width-1 && dl[+1] == -1 && sl[+1] == sl[0])
if (sqnorm(sp[0], sp[+1]) <= squared_radius)
{
dl[+1] = cc;
*ws++ = Point2i(p.x+1, p.y);
}
//left
if( p.x > 0 && dl[-1] == -1 && sl[-1] == sl[0])
if (sqnorm(sp[0], sp[-1]) <= squared_radius)
{
dl[-1] = cc;
*ws++ = Point2i(p.x-1, p.y);
}
//top
if( p.y < sz.height-1 && dl[+sz.width] == -1 && sl[+sz.width] == sl[0])
if (sqnorm(sp[0], sp[+sz.width]) <= squared_radius)
{
dl[+sz.width] = cc;
*ws++ = Point2i(p.x, p.y+1);
}
//top
if( p.y > 0 && dl[-sz.width] == -1 && sl[-sz.width] == sl[0])
if (sqnorm(sp[0], sp[-sz.width]) <= squared_radius)
{
dl[-sz.width] = cc;
*ws++ = Point2i(p.x, p.y-1);
}
// pop most recent and propagate
p = *--ws;
count++;
}
rsizes[cc] = count;
} /* for(int i = 0; i < sz.width; ++i) */
} /* for(int j = 0; j < sz.height; ++j) */
Mat djset(sz, CV_32S);
int* dj = djset.ptr<int>();
yota(dj, dj + sz.area(), 0);
if (brute_force_border)
{
pcl::ScopeTime time("border");
SearchD search;
search.setInputCloud(cloud.makeShared());
#define HT_USE_OMP
#ifdef HT_USE_OMP
//.........这里部分代码省略.........
示例7:
/**
* read data
*/
bool KGDAL2CV::readData(cv::Mat img){
// make sure the image is the proper size
if (img.size().height != m_height){
return false;
}
if (img.size().width != m_width){
return false;
}
// make sure the raster is alive
if (m_dataset == NULL || m_driver == NULL){
return false;
}
// set the image to zero
img = 0;
// iterate over each raster band
// note that OpenCV does bgr rather than rgb
int nChannels = m_dataset->GetRasterCount();
GDALColorTable* gdalColorTable = NULL;
if (m_dataset->GetRasterBand(1)->GetColorTable() != NULL){
gdalColorTable = m_dataset->GetRasterBand(1)->GetColorTable();
}
const GDALDataType gdalType = m_dataset->GetRasterBand(1)->GetRasterDataType();
int nRows, nCols;
//if (nChannels > img.channels()){
// nChannels = img.channels();
//}
for (int c = 0; c < img.channels(); c++){
int realBandIndex = c;
// get the GDAL Band
GDALRasterBand* band = m_dataset->GetRasterBand(c + 1);
//if (hasColorTable == false){
if (GCI_RedBand == band->GetColorInterpretation()) realBandIndex = 2;
if (GCI_GreenBand == band->GetColorInterpretation()) realBandIndex = 1;
if (GCI_BlueBand == band->GetColorInterpretation()) realBandIndex = 0;
//}
if (hasColorTable && gdalColorTable->GetPaletteInterpretation() == GPI_RGB) c = img.channels() - 1;
// make sure the image band has the same dimensions as the image
if (band->GetXSize() != m_width || band->GetYSize() != m_height){ return false; }
// grab the raster size
nRows = band->GetYSize();
nCols = band->GetXSize();
// create a temporary scanline pointer to store data
double* scanline = new double[nCols];
// iterate over each row and column
for (int y = 0; y<nRows; y++){
// get the entire row
band->RasterIO(GF_Read, 0, y, nCols, 1, scanline, nCols, 1, GDT_Float64, 0, 0);
// set inside the image
for (int x = 0; x<nCols; x++){
// set depending on image types
// given boost, I would use enable_if to speed up. Avoid for now.
if (hasColorTable == false){
write_pixel(scanline[x], gdalType, nChannels, img, y, x, realBandIndex);
}
else{
write_ctable_pixel(scanline[x], gdalType, gdalColorTable, img, y, x, c);
}
}
}
// delete our temp pointer
delete[] scanline;
}
return true;
}
示例8: foreground
void T2FMRF_UM::process(const cv::Mat &img_input, cv::Mat &img_output, cv::Mat &img_bgmodel)
{
if(img_input.empty())
return;
loadConfig();
if(firstTime)
saveConfig();
frame = new IplImage(img_input);
if(firstTime)
frame_data.ReleaseMemory(false);
frame_data = frame;
if(firstTime)
{
int width = img_input.size().width;
int height = img_input.size().height;
lowThresholdMask = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
lowThresholdMask.Ptr()->origin = IPL_ORIGIN_BL;
highThresholdMask = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
highThresholdMask.Ptr()->origin = IPL_ORIGIN_BL;
params.SetFrameSize(width, height);
params.LowThreshold() = threshold;
params.HighThreshold() = 2*params.LowThreshold();
params.Alpha() = alpha;
params.MaxModes() = gaussians;
params.Type() = TYPE_T2FMRF_UM;
params.KM() = km; // Factor control for the T2FMRF-UM [0,3] default: 2
params.KV() = kv; // Factor control for the T2FMRF-UV [0.3,1] default: 0.9
bgs.Initalize(params);
bgs.InitModel(frame_data);
old_labeling = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
old = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
mrf.height = height;
mrf.width = width;
mrf.Build_Classes_OldLabeling_InImage_LocalEnergy();
firstTime = false;
}
bgs.Subtract(frameNumber, frame_data, lowThresholdMask, highThresholdMask);
cvCopy(lowThresholdMask.Ptr(), old);
/************************************************************************/
/* the code for MRF, it can be noted when using other methods */
/************************************************************************/
//the optimization process is done when the foreground detection is stable,
if(frameNumber >= 10)
{
gmm = bgs.gmm();
hmm = bgs.hmm();
mrf.background2 = frame_data.Ptr();
mrf.in_image = lowThresholdMask.Ptr();
mrf.out_image = lowThresholdMask.Ptr();
mrf.InitEvidence2(gmm,hmm,old_labeling);
mrf.ICM2();
cvCopy(mrf.out_image, lowThresholdMask.Ptr());
}
cvCopy(old, old_labeling);
lowThresholdMask.Clear();
bgs.Update(frameNumber, frame_data, lowThresholdMask);
cv::Mat foreground(highThresholdMask.Ptr());
if(showOutput)
cv::imshow("T2FMRF-UM", foreground);
foreground.copyTo(img_output);
delete frame;
frameNumber++;
}
示例9: vor
//buil the VOR once(weighted vor diagram?)
void CVT::vor(cv::Mat & img)
{
cv::Mat dist(img.size(), CV_32F, cv::Scalar::all(FLT_MAX)); //an image with infinity distance
cv::Mat root(img.size(), CV_16U, cv::Scalar::all(USHRT_MAX)); //an image of root index
cv::Mat visited(img.size(), CV_8U, cv::Scalar::all(0)); //all unvisited
//init
std::vector< std::pair<float, cv::Point> > open;
ushort site_id = 0;
for (auto& c : this->cells)
{
if (debug)
{
if (c.site.x<0 || c.site.x>img.size().height)
std::cout << "! Warning: c.site.x=" << c.site.x << std::endl;
if (c.site.y<0 || c.site.y>img.size().width)
std::cout << "! Warning: c.site.y=" << c.site.y << std::endl;
}
cv::Point pix((int)c.site.x, (int)c.site.y);
float d = color2dist(img, pix);
//dist에 2D에서의 color value저장. dist 가 크다 <-> 흰색에 가깝다.
dist.at<float>(pix.x, pix.y) = d;
root.at<ushort>(pix.x, pix.y) = site_id++;
open.push_back( std::make_pair(d, pix) );//push
c.coverage.clear();
}
std::make_heap(open.begin(), open.end(), compareCell);
//propagate
while (open.empty() == false)
{
//
std::pop_heap(open.begin(), open.end(), compareCell);//pop, 저장된 color value와 point pair를 꺼낸다.
auto cell = open.back();
auto& cpos = cell.second;
open.pop_back();
//check if the distance from this cell is already updated
if (cell.first > dist.at<float>(cpos.x, cpos.y)) continue;
if (visited.at<uchar>(cpos.x, cpos.y) > 0) continue; //visited
visited.at<uchar>(cpos.x, cpos.y) = 1;
//check the neighbors
for (int dx =-1; dx <= 1; dx++) //x is row
{
int x = cpos.x + dx;
if (x < 0 || x >= img.size().height) continue;
for (int dy = -1; dy <= 1; dy++) //y is column
{
if (dx == 0 && dy == 0) continue; //itself...
int y = cpos.y + dy;
if (y < 0 || y >= img.size().width) continue;
float newd = dist.at<float>(cpos.x, cpos.y) + color2dist(img, cv::Point(x, y));
float oldd = dist.at<float>(x, y);
if (newd < oldd)
{
dist.at<float>(x, y)=newd;
root.at<ushort>(x, y) = root.at<ushort>(cpos.x, cpos.y);
open.push_back(std::make_pair(newd, cv::Point(x, y)));
std::push_heap(open.begin(), open.end(), compareCell);
}
}//end for dy
}//end for dx
}//end while
//collect cells
for (int x = 0; x < img.size().height; x++)
{
for (int y = 0; y < img.size().width; y++)
{
ushort rootid = root.at<ushort>(x, y);
this->cells[rootid].coverage.push_back(cv::Point(x,y));
}//end y
}//end x
//remove empty cells...CVT할때를 위해
int cvt_size = this->cells.size();
for (int i = 0; i < cvt_size; i++)
{
if (this->cells[i].coverage.empty())
{
this->cells[i] = this->cells.back();
this->cells.pop_back();
i--;
cvt_size--;
}
}//end for i
if (debug)
{
//this shows the progress...
double min;
//.........这里部分代码省略.........
示例10: main
/*!
*
*/
int main(int argc,char **argv)
{
readArguments(argc,argv);
aruco::BoardConfiguration boardConf;
boardConf.readFromFile(boC->sval[0]);
cv::VideoCapture vcapture;
//read from camera or from file
if (strcmp(inp->sval[0], "live")==0)
{
vcapture.open(0);
vcapture.set(CV_CAP_PROP_FRAME_WIDTH, wid->ival[0]);
vcapture.set(CV_CAP_PROP_FRAME_HEIGHT, hei->ival[0]);
int val = CV_FOURCC('M', 'P', 'E', 'G');
vcapture.set(CV_CAP_PROP_FOURCC, val);
}
else
vcapture.open(inp->sval[0]);
//check video is open
if (!vcapture.isOpened())
{
std::cerr<<"Could not open video"<<std::endl;
return -1;
}
//read first image to get the dimensions
vcapture>>inpImg;
//Open outputvideo
cv::VideoWriter vWriter;
if (out->count)
vWriter.open(out->sval[0], CV_FOURCC('M','J','P','G'), fps->ival[0], inpImg.size());
//read camera parameters if passed
if (intr->count)
{
params.readFromXMLFile(intr->sval[0]);
params.resize(inpImg.size());
}
//Create gui
cv::namedWindow("thres", CV_GUI_NORMAL | CV_WINDOW_AUTOSIZE);
cv::namedWindow("in", CV_GUI_NORMAL | CV_WINDOW_AUTOSIZE);
cvMoveWindow("thres", 0, 0);
cvMoveWindow("in", inpImg.cols + 5, 0);
boardDetector.setParams(boardConf,params,msiz->dval[0]);
boardDetector.getMarkerDetector().getThresholdParams( dThresParam1,dThresParam2);
// boardDetector.getMarkerDetector().enableErosion(true);//for chessboards
iThresParam1=dThresParam1;
iThresParam2=dThresParam2;
cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTrackBarEvents);
cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTrackBarEvents);
int index=0;
///////////////
// Main Lopp //
///////////////
while(appRunnin)
{
inpImg.copyTo(inpImgCpy);
index++; //number of images captured
double tick = static_cast<double>(cv::getTickCount());//for checking the speed
float probDetect=boardDetector.detect(inpImg); //Detection of the board
tick=(static_cast<double>(cv::getTickCount())-tick)/cv::getTickFrequency();
std::cout<<"Time detection="<<1000*tick<<" milliseconds"<<std::endl;
//print marker borders
for (unsigned int i=0; i<boardDetector.getDetectedMarkers().size(); i++)
boardDetector.getDetectedMarkers()[i].draw(inpImgCpy,cv::Scalar(0,0,255),1);
//print board
if (params.isValid())
{
if ( probDetect>thre->dval[0])
{
aruco::CvDrawingUtils::draw3dAxis( inpImgCpy,boardDetector.getDetectedBoard(),params);
}
}
//DONE! Easy, right?
//show input with augmented information and the thresholded image
cv::imshow("in",inpImgCpy);
cv::imshow("thres",boardDetector.getMarkerDetector().getThresholdedImage());
// write to video if desired
if (out->count)
{
//create a beautiful composed image showing the thresholded
//first create a small version of the thresholded image
cv::Mat smallThres;
cv::resize(boardDetector.getMarkerDetector().getThresholdedImage(),smallThres,
cv::Size(inpImgCpy.cols/3,inpImgCpy.rows/3));
cv::Mat small3C;
cv::cvtColor(smallThres,small3C,CV_GRAY2BGR);
cv::Mat roi=inpImgCpy(cv::Rect(0,0,inpImgCpy.cols/3,inpImgCpy.rows/3));
//.........这里部分代码省略.........
示例11: Ransac_for_buildings
bool Segmentation::Ransac_for_buildings(float dem_spacing, double ransac_threshold, cv::Mat original_tiles_merged)
{
StepResource pStep("Computing RANSAC on all the identified buildings", "app", "a2beb9b8-218e-11e4-969b-b2227cce2b54");
ProgressResource pResource("ProgressBar");
Progress *pProgress = pResource.get();
pProgress-> setSettingAutoClose(true);
pProgress->updateProgress("Computing RANSAC on all buildings", 0, NORMAL);
Ransac_buildings = Ransac(Segmentation::path);
cv::Mat roof_image = cv::Mat::zeros(original_tiles_merged.size(), CV_8UC3);
buildingS.resize(blobs.size());
buildingS_inliers.resize(blobs.size());
buildingS_outliers.resize(blobs.size());
buildingS_plane_coefficients.resize(blobs.size());
buldingS_number_inliers.resize(blobs.size());
std::ofstream building_file;
std::ofstream cont_file;
cont_file.open (std::string(path) + "/Results/Number_of_RANSAC_applications.txt");
for(int i = 0; i < blobs.size(); i++)
{// i index is the building (blob) index
pProgress->updateProgress("Computing RANSAC on all buildings\nBuilding "+ StringUtilities::toDisplayString(i) + " on "+ StringUtilities::toDisplayString(blobs.size()), static_cast<double>(static_cast<double>(i)/blobs.size()*100), NORMAL);
building_file.open (std::string(path) + "/Results/Building_" + StringUtilities::toDisplayString(i)+".txt");
building_file << 'i' << '\t' << 'j' << '\t' << 'X' << '\t' << 'Y' << '\t' << 'Z' << '\n';
buildingS[i].setConstant(blobs[i].size(), 3, 0.0);
// the j loop retrieves the X, Y, Z coordinate for each pixel of all the buildings
for(int j = 0; j < blobs[i].size(); j++)
{// j index is the pixel index for the single building
// loop on all the pixel of the SINGLE building
int pixel_column = blobs[i][j].x;
int pixel_row = blobs[i][j].y;
double x_building = pixel_column * dem_spacing;// xMin + pixel_column * dem_spacing // object coordinate
double y_building = pixel_row * dem_spacing;// yMin + pixel_row * dem_spacing // object coordinate
double z_building = original_tiles_merged.at<float>(pixel_row, pixel_column);//object coordinate
buildingS[i](j,0) = x_building;
buildingS[i](j,1) = y_building;
buildingS[i](j,2) = z_building;
building_file << pixel_row+1 << '\t' << pixel_column+1 << '\t' << buildingS[i](j,0) << '\t' << buildingS[i](j,1) << '\t' << buildingS[i](j,2) << '\n'; //+1 on the imae coordinates to verify with opticks' rasters (origin is 1,1)
}
building_file.close();
std::ofstream inliers_file;
std::ofstream parameters_file;
inliers_file.open (std::string(path) + "/Results/Inliers_building_" + StringUtilities::toDisplayString(i)+".txt");
parameters_file.open (std::string(path) + "/Results/plane_parameters_building_" + StringUtilities::toDisplayString(i)+".txt");
//parameters_file << "a\tb\tc\td\tmean_dist\tstd_dist\n";
int cont = 0;
Ransac_buildings.ransac_msg += "\n____________Building number " + StringUtilities::toDisplayString(i) +"____________\n";
Ransac_buildings.ransac_msg += "\nITERATION NUMBER " + StringUtilities::toDisplayString(cont) +"\n";
Ransac_buildings.ComputeModel(buildingS[i], ransac_threshold);
buldingS_number_inliers[i]= Ransac_buildings.n_best_inliers_count;
buildingS_inliers[i] = Ransac_buildings.final_inliers;
buildingS_outliers[i] = Ransac_buildings.final_outliers;
buildingS_plane_coefficients[i] = Ransac_buildings.final_model_coefficients;
double inliers_percentage = static_cast<double>( (Ransac_buildings.n_best_inliers_count) ) / static_cast<double> (buildingS[i].rows());
int inliers_so_far = Ransac_buildings.n_best_inliers_count;
std::vector<int> old_final_outliers = Ransac_buildings.final_outliers;
// DRAWS THE ROOFS yellow
for (int k = 0; k < Ransac_buildings.n_best_inliers_count; k++)
{
int pixel_row = static_cast<int>(buildingS[i](Ransac_buildings.final_inliers[k], 1) / dem_spacing);
int pixel_column = static_cast<int>(buildingS[i](Ransac_buildings.final_inliers[k], 0) / dem_spacing);
unsigned char r = 255;// unsigned char(255 * (rand()/(1.0 + RAND_MAX)));
unsigned char g = 255;// unsigned char(255 * (rand()/(1.0 + RAND_MAX)));
unsigned char b = 0;//unsigned char(255 * (rand()/(1.0 + RAND_MAX)));
roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[0] = b;
roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[1] = g;
roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[2] = r;
}
while (inliers_percentage < 0.90)
{
cont ++;
Ransac_buildings.ransac_msg += "\nITERATION NUMBER " + StringUtilities::toDisplayString(cont) +"\n";
Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> building_outliers;
building_outliers.setConstant(buildingS[i].rows() - inliers_so_far, 3, 0.0);
//* forse il metodo va già bene così, perchè riempio la matrice deglio outlier in maniera ordinata,
//* solo che gli indici degli inlier/outlier non sono più indicativi rispetto alla matrice di building originale, ma rispetto alla matrice di innput
//* devo riporatre gli ID degli indici alla loro posizione originale
for (int w = 0; w <building_outliers.rows(); w++)
{
building_outliers(w, 0) = buildingS[i](old_final_outliers[w], 0);
building_outliers(w, 1) = buildingS[i](old_final_outliers[w], 1);
building_outliers(w, 2) = buildingS[i](old_final_outliers[w], 2);
//Ransac_buildings.ransac_msg += "\n" + StringUtilities::toDisplayString(pixel_row+1) + "\t" + StringUtilities::toDisplayString(pixel_column+1) + "\t" + StringUtilities::toDisplayString(final_outliers[w]) + "\t" + StringUtilities::toDisplayString(building_outliers(w, 0))+ "\t"+ StringUtilities::toDisplayString(building_outliers(w, 1)) + "\t" + StringUtilities::toDisplayString(building_outliers(w, 2))+"\n"; // needed for tesing (test passed at first iteration)
}
//.........这里部分代码省略.........
示例12: get_bboxes_
cv::Mat get_bboxes_(const cv::Mat & image, const cv::Mat & seg, int x1, int y1, int x2, int y2) {
double max_id_;
cv::minMaxIdx(seg, nullptr, &max_id_);
int max_id = max_id_;
std::vector<std::shared_ptr<Segment>> segments;
segments.reserve(max_id);
int nchannels = image.channels();
cv::Size size = image.size();
for (int i = 0; i <= max_id; i++) {
segments.push_back(std::make_shared<UniformSegment>(i, size));
}
{
AdjacencyMatrix adjacency(max_id + 1);
for (int i = 0; i < image.rows; i++) {
for (int j = 0; j < image.cols; j++) {
cv::Point p(j, i);
uint16_t id = seg.at<uint16_t>(p);
segments[id]->addPoint(image, p);
if (i < image.rows - 1) {
uint16_t n = seg.at<uint16_t>(i+1, j);
if (n != id && adjacency.get(id, n) == false) {
adjacency.get(id, n) = true;
segments[id]->addNeighbour(n);
segments[n]->addNeighbour(id);
}
}
if (j < image.cols - 1) {
uint16_t n = seg.at<uint16_t>(i, j+1);
if (n != id && adjacency.get(id, n) == false) {
adjacency.get(id, n) = true;
segments[id]->addNeighbour(n);
segments[n]->addNeighbour(id);
}
}
}
}
}
cv::Mat bboxes_out;
float similarity_sum = 0.f;
for (auto & s: segments) {
s->finalizeSetup();
}
//for (uint32_t i = 0; i < bboxes.rows; i++) {
//cv::Rect truth(cv::Point(bboxes.at<int>(i, 0), bboxes.at<int>(i, 1)), cv::Point(bboxes.at<int>(i, 2), bboxes.at<int>(i, 3)));
cv::Rect truth(cv::Point(x1, y1), cv::Point(x2, y2));
cv::Mat mask = cv::Mat::zeros(image.size(), CV_8UC1);
cv::rectangle(mask, truth, cv::Scalar(1), CV_FILLED);
std::unordered_set<uint32_t> contained_segments;
std::unordered_set<uint32_t> crossing_segments;
for (const auto & s: segments) {
cv::Mat masked;
mask.copyTo(masked, s->mask);
int count = cv::countNonZero(masked);
if (count == s->size)
contained_segments.insert(s->id);
else if (count)
crossing_segments.insert(s->id);
}
std::shared_ptr<Segment> s = std::make_shared<UniformSegment>(-1, image.size());
if (contained_segments.size()) {
for (const int & id: contained_segments) {
s = s->merge(segments[id].get());
}
} else {
s = s->merge(segments[seg.at<uint16_t>(truth.y + truth.height / 2, truth.x + truth.width)].get());
}
std::cout << "size: " << crossing_segments.size() << std::endl;
#ifdef DEBUG
cv::Mat draw;
s->mask.copyTo(draw);
draw *= 255;
/*for (const int & n: s->neighbours) {
draw += segments[n]->mask * 127;
}*/
//cv::rectangle(draw, cv::Point(bboxes.at<int>(0, 0), bboxes.at<int>(0, 1)), cv::Point(bboxes.at<int>(0, 2), bboxes.at<int>(0, 3)), cv::Scalar(255));
cv::namedWindow("Mask", cv::WINDOW_NORMAL);
cv::imshow("Mask", draw);
cv::waitKey();
#endif
float max_sim = jaccardSimilarity(truth, cv::Rect(s->min_p, s->max_p));
#ifdef DEBUG
std::cout << "max_sim: " << max_sim << std::endl;
#endif
bool quit = false;
while (quit == false) {
quit = true;
for (const int & n: s->neighbours) {
std::shared_ptr<Segment> s2 = s->merge(segments[n].get());
float sim = jaccardSimilarity(truth, cv::Rect(s2->min_p, s2->max_p));
//.........这里部分代码省略.........
示例13: calculate
void MapperGradAffine::calculate(
const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
Mat gradx, grady, imgDiff;
Mat img2;
CV_DbgAssert(img1.size() == image2.size());
CV_DbgAssert(img1.channels() == image2.channels());
CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);
if(!res.empty()) {
// We have initial values for the registration: we move img2 to that initial reference
res->inverseWarp(image2, img2);
} else {
img2 = image2;
}
// Get gradient in all channels
gradient(img1, img2, gradx, grady, imgDiff);
// Matrices with reference frame coordinates
Mat grid_r, grid_c;
grid(img1, grid_r, grid_c);
// Calculate parameters using least squares
Matx<double, 6, 6> A;
Vec<double, 6> b;
// For each value in A, all the matrix elements are added and then the channels are also added,
// so we have two calls to "sum". The result can be found in the first element of the final
// Scalar object.
Mat xIx = grid_c.mul(gradx);
Mat xIy = grid_c.mul(grady);
Mat yIx = grid_r.mul(gradx);
Mat yIy = grid_r.mul(grady);
Mat Ix2 = gradx.mul(gradx);
Mat Iy2 = grady.mul(grady);
Mat xy = grid_c.mul(grid_r);
Mat IxIy = gradx.mul(grady);
A(0, 0) = sum(sum(sqr(xIx)))[0];
A(0, 1) = sum(sum(xy.mul(Ix2)))[0];
A(0, 2) = sum(sum(grid_c.mul(Ix2)))[0];
A(0, 3) = sum(sum(sqr(grid_c).mul(IxIy)))[0];
A(0, 4) = sum(sum(xy.mul(IxIy)))[0];
A(0, 5) = sum(sum(grid_c.mul(IxIy)))[0];
A(1, 1) = sum(sum(sqr(yIx)))[0];
A(1, 2) = sum(sum(grid_r.mul(Ix2)))[0];
A(1, 3) = A(0, 4);
A(1, 4) = sum(sum(sqr(grid_r).mul(IxIy)))[0];
A(1, 5) = sum(sum(grid_r.mul(IxIy)))[0];
A(2, 2) = sum(sum(Ix2))[0];
A(2, 3) = A(0, 5);
A(2, 4) = A(1, 5);
A(2, 5) = sum(sum(IxIy))[0];
A(3, 3) = sum(sum(sqr(xIy)))[0];
A(3, 4) = sum(sum(xy.mul(Iy2)))[0];
A(3, 5) = sum(sum(grid_c.mul(Iy2)))[0];
A(4, 4) = sum(sum(sqr(yIy)))[0];
A(4, 5) = sum(sum(grid_r.mul(Iy2)))[0];
A(5, 5) = sum(sum(Iy2))[0];
// Lower half values (A is symmetric)
A(1, 0) = A(0, 1);
A(2, 0) = A(0, 2);
A(2, 1) = A(1, 2);
A(3, 0) = A(0, 3);
A(3, 1) = A(1, 3);
A(3, 2) = A(2, 3);
A(4, 0) = A(0, 4);
A(4, 1) = A(1, 4);
A(4, 2) = A(2, 4);
A(4, 3) = A(3, 4);
A(5, 0) = A(0, 5);
A(5, 1) = A(1, 5);
A(5, 2) = A(2, 5);
A(5, 3) = A(3, 5);
A(5, 4) = A(4, 5);
// Calculation of b
b(0) = -sum(sum(imgDiff.mul(xIx)))[0];
b(1) = -sum(sum(imgDiff.mul(yIx)))[0];
b(2) = -sum(sum(imgDiff.mul(gradx)))[0];
b(3) = -sum(sum(imgDiff.mul(xIy)))[0];
b(4) = -sum(sum(imgDiff.mul(yIy)))[0];
b(5) = -sum(sum(imgDiff.mul(grady)))[0];
// Calculate affine transformation. We use Cholesky decomposition, as A is symmetric.
Vec<double, 6> k = A.inv(DECOMP_CHOLESKY)*b;
Matx<double, 2, 2> linTr(k(0) + 1., k(1), k(3), k(4) + 1.);
Vec<double, 2> shift(k(2), k(5));
if(res.empty()) {
res = Ptr<Map>(new MapAffine(linTr, shift));
} else {
MapAffine newTr(linTr, shift);
res->compose(newTr);
}
}
示例14: Binarize
void cds::Binarize(cv::Mat const &anImage, cv::Mat &binaryResult)
{
binaryResult.create(anImage.size(), anImage.type());
cv::threshold(anImage, binaryResult, 128, 255, CV_THRESH_BINARY+CV_THRESH_OTSU);
}
示例15: segment
cv::Mat ArmObjSegmentation::segment(cv::Mat& color_img, cv::Mat& depth_img, cv::Mat& self_mask,
cv::Mat& table_mask, bool init_color_models)
{
cv::Mat color_img_lab_uchar(color_img.size(), color_img.type());
cv::Mat color_img_lab(color_img.size(), CV_32FC3);
cv::cvtColor(color_img, color_img_lab_uchar, CV_BGR2HSV);
// cv::cvtColor(color_img, color_img_lab_uchar, CV_BGR2Lab);
color_img_lab_uchar.convertTo(color_img_lab, CV_32FC3, 1.0/255);
cv::Mat tmp_bw(color_img.size(), CV_8UC1);
cv::Mat bw_img(color_img.size(), CV_32FC1);
// Convert to grayscale
cv::cvtColor(color_img, tmp_bw, CV_BGR2GRAY);
tmp_bw.convertTo(bw_img, CV_32FC1, 1.0/255);
#ifdef VISUALIZE_GRAPH_WEIGHTS
cv::Mat fg_weights(color_img.size(), CV_32FC1, cv::Scalar(0.0));
cv::Mat fg_tied_weights(color_img.size(), CV_32FC1, cv::Scalar(0.0));
cv::Mat bg_tied_weights(color_img.size(), CV_32FC1, cv::Scalar(0.0));
cv::Mat bg_weights(color_img.size(), CV_32FC1, cv::Scalar(0.0));
cv::Mat wu_weights(color_img.size(), CV_32FC1, cv::Scalar(0.0));
cv::Mat wl_weights(color_img.size(), CV_32FC1, cv::Scalar(0.0));
#endif // VISUALIZE_GRAPH_WEIGHTS
// TODO: Clean this up once we have stuff working well
cv::Mat inv_self_mask;
cv::bitwise_not(self_mask, inv_self_mask);
cv::Mat much_larger_mask(self_mask.size(), CV_8UC1, cv::Scalar(255));
cv::Mat enlarge_element(bg_enlarge_size_, bg_enlarge_size_, CV_8UC1, cv::Scalar(255));
cv::dilate(inv_self_mask, much_larger_mask, enlarge_element);
cv::Mat larger_mask, known_arm_mask;
cv::Mat arm_band = getArmBand(inv_self_mask, arm_enlarge_width_, arm_shrink_width_, false,
larger_mask, known_arm_mask);
// Get known arm pixels
cv::Mat known_arm_pixels;
color_img_lab.copyTo(known_arm_pixels, known_arm_mask);
cv::Mat known_bg_mask = much_larger_mask - larger_mask;
// Get known object pixels
cv::Mat known_bg_pixels;
color_img_lab.copyTo(known_bg_pixels, known_bg_mask);
// Get stats for building graph
int num_nodes = 0;
int num_edges = 0;
tabletop_pushing::NodeTable nt;
for (int r = 0; r < much_larger_mask.rows; ++r)
{
for (int c = 0; c < much_larger_mask.cols; ++c)
{
if (much_larger_mask.at<uchar>(r,c) > 0)
{
// Add this as another node
int cur_idx = num_nodes++;
nt.addNode(r, c, cur_idx);
int test_idx = nt.getIdx(r,c);
// Check for edges to add
// left edge
if (c > 0 && much_larger_mask.at<uchar>(r, c-1) > 0)
{
num_edges++;
}
if (r > 0)
{
// Up edge
if(much_larger_mask.at<uchar>(r-1,c) > 0)
{
num_edges++;
}
}
}
}
}
if (num_nodes < 1)
{
cv::Mat empty(color_img.size(), CV_8UC1, cv::Scalar(0));
return empty;
}
if (!have_arm_color_model_)
{
std::cout << "[arm_obj_segmentation]: Building arm color model." << std::endl;
arm_color_model_ = getGMMColorModel(known_arm_pixels, known_arm_mask, 3);
have_arm_color_model_ = true;
}
if(!have_bg_color_model_)
{
std::cout << "[arm_obj_segmentation]: Building bg color model." << std::endl;
bg_color_model_ = getGMMColorModel(known_bg_pixels, known_bg_mask, 5);
have_bg_color_model_ = true;
}
#ifdef USE_CANNY_EDGES
cv::Mat canny_edges_8bit;
cv::Mat canny_edges;
cv::Canny(tmp_bw, canny_edges_8bit, 120, 250);
canny_edges_8bit.convertTo(canny_edges, CV_32FC1, 1.0/255);
//.........这里部分代码省略.........