当前位置: 首页>>代码示例>>C++>>正文


C++ Mat::clone方法代码示例

本文整理汇总了C++中cv::Mat::clone方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::clone方法的具体用法?C++ Mat::clone怎么用?C++ Mat::clone使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv::Mat的用法示例。


在下文中一共展示了Mat::clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: setImages

void BlockMatching::setImages(cv::Mat &imageLeft, cv::Mat &imageRight)
{
    this->imageLeft = imageLeft.clone();
    this->imageRight = imageRight.clone();
}
开发者ID:DLuensch,项目名称:StereoVision-ADCensus,代码行数:5,代码来源:blockmatching.cpp

示例2: faceOff

void HumanModel::faceOff(const cv::Mat& image, const std::vector<vec2f>& points)
{
	face_texture = image;
	// model face has been triangulated when loading, if has polygons.

	// model use OpenGL coordinates, in which -Z forward, Y up.
	std::vector<vec3f> verts;
	std::vector<vec2f> face_texcoords;

#pragma warning (disable: 4305)
	// vertex and corresponding texture coordinates, grabed by hand in Blender.
	// Note that Blender use OpenGL coodinate system, texture origin is at bottom left.
	verts.push_back(vec3f(0.03344, 1.65360, 0.14763));     face_texcoords.push_back(vec2f(0.631, 0.571));
	verts.push_back(vec3f(-0.03354, 1.65374, 0.14678));    face_texcoords.push_back(vec2f(0.377, 0.571));
	verts.push_back(vec3f(-0.0000, 1.65530, 0.15450));     face_texcoords.push_back(vec2f(0.504, 0.592));
	verts.push_back(vec3f(0.04550, 1.65460, 0.14315));     face_texcoords.push_back(vec2f(0.667, 0.574));
	verts.push_back(vec3f(0.01932, 1.65027, 0.14915));     face_texcoords.push_back(vec2f(0.589, 0.568));
	verts.push_back(vec3f(-0.01932, 1.65027, 0.14915));    face_texcoords.push_back(vec2f(0.411, 0.568));
	verts.push_back(vec3f(-0.04550, 1.65460, 0.14315));    face_texcoords.push_back(vec2f(0.333, 0.574));
	verts.push_back(vec3f(0.0182, 1.6172, 0.1539));        face_texcoords.push_back(vec2f(0.574, 0.442));
	verts.push_back(vec3f(-0.0182, 1.6172, 0.1539));       face_texcoords.push_back(vec2f(0.426, 0.442));
	verts.push_back(vec3f(0.00000, 1.6145, 0.1730));       face_texcoords.push_back(vec2f(0.503, 0.447));
	verts.push_back(vec3f(0.00907, 1.6037, 0.15788));      face_texcoords.push_back(vec2f(0.532, 0.405));
	verts.push_back(vec3f(-0.00907, 1.6037, 0.15788));     face_texcoords.push_back(vec2f(0.468, 0.405));
	verts.push_back(vec3f(0.0232, 1.5770, 0.15163));       face_texcoords.push_back(vec2f(0.575, 0.305));
	verts.push_back(vec3f(-0.0232, 1.5770, 0.15163));      face_texcoords.push_back(vec2f(0.425, 0.305));
	verts.push_back(vec3f(0.00000, 1.57779, 0.15502));     face_texcoords.push_back(vec2f(0.508, 0.225));
	verts.push_back(vec3f(0.00000, 1.58820, 0.16580));     face_texcoords.push_back(vec2f(0.500, 0.356));
	verts.push_back(vec3f(0.07360, 1.65420, 0.0936));      face_texcoords.push_back(vec2f(0.889, 0.594));
	verts.push_back(vec3f(-0.07360, 1.65420, 0.0936));     face_texcoords.push_back(vec2f(0.111, 0.594));
	verts.push_back(vec3f(0.0555, 1.5669, 0.1042));        face_texcoords.push_back(vec2f(0.824, 0.245));
	verts.push_back(vec3f(-0.0555, 1.5669, 0.1042));       face_texcoords.push_back(vec2f(0.176, 0.245));
	verts.push_back(vec3f(0.0403, 1.6178, 0.1487));        face_texcoords.push_back(vec2f(0.656, 0.434));
	verts.push_back(vec3f(-0.0403, 1.6178, 0.1487));       face_texcoords.push_back(vec2f(0.344, 0.434));
	verts.push_back(vec3f(0.0727, 1.6180, 0.0912));        face_texcoords.push_back(vec2f(0.916, 0.460));
	verts.push_back(vec3f(-0.0727, 1.6180, 0.0912));       face_texcoords.push_back(vec2f(0.094, 0.460));
	verts.push_back(vec3f(0.0674, 1.6900, 0.1101));        face_texcoords.push_back(vec2f(0.804, 0.721));
	verts.push_back(vec3f(-0.0674, 1.6900, 0.1101));       face_texcoords.push_back(vec2f(0.196, 0.721));
	verts.push_back(vec3f(0.0325, 1.67235, 0.15495));      face_texcoords.push_back(vec2f(0.622, 0.648));
	verts.push_back(vec3f(-0.0325, 1.67235, 0.15495));     face_texcoords.push_back(vec2f(0.378, 0.648));
	verts.push_back(vec3f(0.0000, 1.6740, 0.15900));       face_texcoords.push_back(vec2f(0.500, 0.656));
	verts.push_back(vec3f(0.02380, 1.5391, 0.1280));       face_texcoords.push_back(vec2f(0.625, 0.112));
	verts.push_back(vec3f(0.0000, 1.5340, 0.13860));       face_texcoords.push_back(vec2f(0.500, 0.089));
	verts.push_back(vec3f(-0.02380, 1.5391, 0.1280));      face_texcoords.push_back(vec2f(0.375, 0.112));
	verts.push_back(vec3f(0.0436, 1.7156, 0.1360));        face_texcoords.push_back(vec2f(0.662, 0.796));
	verts.push_back(vec3f(0.0000, 1.7248, 0.1462));        face_texcoords.push_back(vec2f(0.500, 0.819));
	verts.push_back(vec3f(-0.0436, 1.7156, 0.1360));       face_texcoords.push_back(vec2f(0.338, 0.796));
#pragma warning (default: 4305)

	assert(points.size() == POINT_MAX);
	assert(verts.size() == POINT_MAX);

	// NOTE: image or cv::Mat is top down on Y axis, while OpenGL use bottom up Y axis.
	float scale_x = (verts[LEYE_C].x - verts[REYE_C].x)/(points[LEYE_C].x - points[REYE_C].x);
	float scale_y = (verts[EYEBROW_M].y - verts[NOSTRIL_R].y)/(points[EYEBROW_M].y - points[NOSTRIL_R].y);
	std::cout << "face area scaling: (" << scale_x << "," << scale_y << ")\n";
	float ratio = scale_y / scale_x;
	
	// looking for a material named "face"
	auto is_face_material = [](const material_t& material) -> bool { return material.name == "face"; };
	auto it = std::find_if(materials.begin(), materials.end(), is_face_material);
	assert(it !=  materials.end());
	size_t face_material_id = std::distance(materials.begin(), it);
	std::cout << "face material id =" << _ << face_material_id << std::endl;
	
#ifdef DEBUG
	Mat texture = cv::imread("Model_obj/face.png");
	assert(!texture.empty());

	const Size texture_size = texture.size();
#else
	const Size texture_size(1024, 1024);
#endif

	Rect rect_src(Point(0, 0), texture_size);
	const std::vector<vec3i> delaunay = getSubdivTriangle(rect_src, face_texcoords);

#ifdef DEBUG
	const Size image_size = image.size();
	Rect rect_dst(Point(0, 0), image_size);

	Mat image2 = image.clone();
	std::vector<vec2f> points_tl;
	points_tl.reserve(points.size());
	for(const vec2f& point : points)
	{
		vec2f point_tl(point.x, 1.0f - point.y);
		Point pt(cvRound(point_tl.x * image_size.width), cvRound(point_tl.y * image_size.height));
		circle(image2, pt, 3, CV_RGB(0, 255, 00), 1, LINE_8);

		points_tl.push_back(point_tl);
	}

	std::vector<vec2f> face_texcoords_tl;
	face_texcoords_tl.reserve(face_texcoords.size());
	for(const vec2f& texcoord : face_texcoords)
		face_texcoords_tl.push_back(vec2f(texcoord.u, 1.0f - texcoord.v));

	cv::imshow("texture", drawTriangle(texture, face_texcoords_tl, delaunay));
	cv::imshow("image", drawTriangle(image2, points_tl, delaunay));
//.........这里部分代码省略.........
开发者ID:KAlO2,项目名称:Pea,代码行数:101,代码来源:HumanModel.cpp

示例3: FittingCurve_RANSAC

    //! Fitting line or Curve via RANSAC has constraint in some conditions.
    // \minDataNum:   the minimum number of data required to fit the model. 
    // \closeDataNum: the number of close data values required to assert that a model fits well to data.
    // \iterNum:      the number of iterations performed by the algorithm.
    // \thValue:      a threshold value for determining when a datum fits a model.
    void FittingCurve_RANSAC(const std::vector<cv::Point2d> &pointSet, 
                             const int &termNum, const int &minDataNum, 
                             const int &iterNum, const double &thValue,
                             const int &closeDataNum, cv::Mat &coefs, const cv::Mat &img)
    {
        double startTime = (double)cv::getTickCount();

        std::vector<cv::Point2d> bestPointSet;   //data points from which this model has been estimated
        double bestError = INFINITY;        //the error of this model relative to the data
        double errorSum, minErrorSum;       //Prevent the sampling number smaller than closeDataNum
        int errorFlag = 0; 
        
        cv::Mat maybeModel = cv::Mat::zeros(termNum, 1, CV_64F);
        cv::Mat img2 = img.clone();
        
        for(int iter = 0; iter < iterNum; iter++)
        {
            std::vector<cv::Point2d> tempPointSet = pointSet;
            errorSum = 0;
            
            //! Generate a set of maybe_inliers
            std::vector<cv::Point2d> maybeInliersPointSet;
            uint64 seed = iter + 1; 
            cv::RNG rng(seed);
            for(int i = 0; i < minDataNum; i++)
            {
                int n = rng.uniform(0, (int)tempPointSet.size()-1);
                maybeInliersPointSet.push_back(tempPointSet.at(n));
                tempPointSet.erase(tempPointSet.begin()+n);
                if (tempPointSet.size() == 0)
                    break;
            }//end for
            
            
            //! Fitting maybe_inliers to model by Least Squares Method
            FittingCurve_LS(maybeInliersPointSet, termNum, maybeModel);
            bestPointSet = maybeInliersPointSet; //Random Sampling Model
#if 0
            for(int i=0; i<bestPointSet.size(); i++)
            {
                cv::circle(img2, bestPointSet.at(i), 1, CV_RGB(125, 125, 0));
            }
            std::vector<cv::Point2d> sampledPoints;
            IPMDrawCurve(maybeModel, img2, sampledPoints, CV_RGB(100, 0, 0) );
            //cv::imshow("RANSAC", img2);
            //cv::waitKey();
#endif
            
            
            //! Different from the linear modeling, the non-linear asks for computation of the distance between point and a polynomial curve.
            //! deviration of distance function d{l(x)}/d{x} = 0
            //! It is: 4*a2*x^3 + 6*a1*a2*x^2 + 2*(1+2*a2*(a0-y0)+a1^2)*x + 2*(a1*(-y0+a0)-x0) = 0 
            //! Solve this cubic funtion (polynomial of degree three)
            double a0, a1, a2;
            if (termNum == 2) {
                //!Linear RANSAC
                a0 = maybeModel.at<double>(0,0);
                a1 = maybeModel.at<double>(1,0);
                for (int i = 0; i < tempPointSet.size(); i++) {
                    int x = cvRound(tempPointSet.at(i).x);
                    int y = cvRound(tempPointSet.at(i).y);
                    double error = std::pow(std::abs(a1*x - y + a0)/sqrt(a1*a1+1), 2);
                    errorSum += error;
                    if (error < thValue) {
                        bestPointSet.push_back(tempPointSet.at(i));
                    }
                }//end for
            }
            else if (termNum == 3) {
                //!NonLinear RANSAC
                a0 = maybeModel.at<double>(0,0);
                a1 = maybeModel.at<double>(1,0);
                a2 = maybeModel.at<double>(2,0);
                for (int i = 0; i < tempPointSet.size(); i++) {
                    int x0 = cvRound(tempPointSet.at(i).x);
                    int y0 = cvRound(tempPointSet.at(i).y);
                    double error = std::pow(std::abs(y0 - (a0+(a1+a2*x0)*x0)), 2);
                    errorSum += error;
                    //cout << "Outliers Error: " << error << endl;
                    if (error < thValue) {
                        bestPointSet.push_back(tempPointSet.at(i));
                    }
                }//end for
            }
            else {
                std::cerr << "Only fitting line of degree 1 or 2." << std::endl;
            }

            //! First sampling as initialization
            if(iter == 0) minErrorSum = errorSum;
//            cout << "errorSum: " << errorSum << " minError: " << minErrorSum << endl;
            //! Prevent the situation that all maybePoint number smaller than the close data number
            if(errorSum < minErrorSum)
            {
                minErrorSum = errorSum;
//.........这里部分代码省略.........
开发者ID:atpccb,项目名称:DriveAssist,代码行数:101,代码来源:FittingCurve.cpp

示例4: matchImagesAsync

// ----------------------------------------------------------------------------------
void QualityMatcher::matchImagesAsync(cv::Mat imageSrc, cv::Mat imageDst, cv::Mat priorH, MatchingResultCallback cb)
{
  if (m_matchingThread)
  {
    m_matchingThread->join();
  }
  
  // extract one channel for matching -> better have YUV, but green channel is god enough
  cv::Mat rgbSrc[4];
  cv::Mat rgbDst[4];
  cv::split(imageSrc, rgbSrc);
  cv::split(imageDst, rgbDst);
    
  m_matchingThread.reset(new std::thread(std::bind(&QualityMatcher::doTheMagic, this, rgbSrc[1], rgbDst[1], priorH.clone(), cb)));
}
开发者ID:BhaveshKumbhani,项目名称:VideoPanorama,代码行数:16,代码来源:slowmatcher.cpp

示例5: testFeatureCollector

void LayoutTest::testFeatureCollector(const cv::Mat & src) const {
	
	rdf::Timer dt;

	// parse xml
	PageXmlParser parser;
	parser.read(mConfig.xmlPath());

	// test loading of label lookup
	LabelManager lm = LabelManager::read(mConfig.featureCachePath());
	qInfo().noquote() << lm.toString();

	// compute super pixels
	SuperPixel sp(src);

	if (!sp.compute())
		qCritical() << "could not compute super pixels!";

	// feed the label lookup
	SuperPixelLabeler spl(sp.getMserBlobs(), Rect(src));
	spl.setLabelManager(lm);

	// set the ground truth
	if (parser.page())
		spl.setRootRegion(parser.page()->rootRegion());

	if (!spl.compute())
		qCritical() << "could not compute SuperPixel labeling!";

	SuperPixelFeature spf(src, spl.set());
	if (!spf.compute())
		qCritical() << "could not compute SuperPixel features!";

	FeatureCollectionManager fcm(spf.features(), spf.set());
	fcm.write(spl.config()->featureFilePath());
	
	FeatureCollectionManager testFcm = FeatureCollectionManager::read(spl.config()->featureFilePath());

	for (int idx = 0; idx < testFcm.collection().size(); idx++) {

		if (testFcm.collection()[idx].label() != fcm.collection()[idx].label())
			qWarning() << "wrong labels!" << testFcm.collection()[idx].label() << "vs" << fcm.collection()[idx].label();
		else
			qInfo() << testFcm.collection()[idx].label() << "is fine...";
	}

	// drawing
	cv::Mat rImg = src.clone();

	// save super pixel image
	//rImg = superPixel.drawSuperPixels(rImg);
	//rImg = tabStops.draw(rImg);
	rImg = spl.draw(rImg);
	rImg = spf.draw(rImg);
	QString dstPath = rdf::Utils::instance().createFilePath(mConfig.outputPath(), "-textlines");
	rdf::Image::save(rImg, dstPath);
	qDebug() << "debug image saved: " << dstPath;

	qDebug() << "image path: " << mConfig.imagePath();

}
开发者ID:TUWien,项目名称:ReadFramework,代码行数:61,代码来源:DebugMarkus.cpp

示例6: Init

//===========================================================================
void Patch::Init(int t, double a, double b, cv::Mat &W)
{
  assert((W.type() == CV_32F)); _t=t; _a=a; _b=b; _W=W.clone(); return;
}
开发者ID:Belial2010,项目名称:leapFaceTracker,代码行数:5,代码来源:Patch.cpp

示例7: forward_backward_track

bool Stabilizer::forward_backward_track(const cv::Mat& frame)
{
    cv::Mat previous_frame_gray;
    cv::cvtColor(prevFrame, previous_frame_gray, cv::COLOR_BGR2GRAY);
  
    cv::goodFeaturesToTrack(previous_frame_gray, previousFeatures, 500, 0.01, 5);

    size_t n = previousFeatures.size();
    CV_Assert(n);
    
    // Compute optical flow in selected points.
    std::vector<cv::Point2f> currentFeatures;
    std::vector<uchar> state;
    std::vector<float> error;
    cv::calcOpticalFlowPyrLK(prevFrame, frame, previousFeatures, currentFeatures, state, error);

    float median_error = median<float>(error);

    std::vector<cv::Point2f> good_points;
    std::vector<cv::Point2f> curr_points;
    for (size_t i = 0; i < n; ++i)
    {
        if (state[i] && (error[i] <= median_error))
        {
            good_points.push_back(previousFeatures[i]);
            curr_points.push_back(currentFeatures[i]);
        }
    }

    size_t s = good_points.size();
    CV_Assert(s == curr_points.size());
    
    //Compute backward optical flow
    std::vector<cv::Point2f> backwardPoints;
    std::vector<uchar> backState;
    std::vector<float> backError;

    cv::calcOpticalFlowPyrLK(frame, prevFrame, curr_points, backwardPoints, backState, backError);
    float median_back_error = median<float>(backError);

    CV_Assert(s == backwardPoints.size());
    std::vector<float> diff(s);

    for (size_t i = 0; i < s; ++i)
    {
        diff[i] = cv::norm(good_points[i] - backwardPoints[i]);
        // diff[i] = (good_points[i].x - backwardPoints[i].x) * (good_points[i].x - backwardPoints[i].x) + (good_points[i].y - backwardPoints[i].y) * (good_points[i].y - backwardPoints[i].y);
    }

    for (int i = s - 1; i >= 0; --i)
    {
        if (!backState[i] || (backError[i] <= median_back_error) || (diff[i] > 400))
        {
            good_points.erase(good_points.begin() + i);
            curr_points.erase(curr_points.begin() + i);
        }
    }

    s = good_points.size();

    // Find points shift.
    std::vector<float> shifts_x(s);
    std::vector<float> shifts_y(s);
    
    for (size_t i = 0; i < s; ++i)
    {
        shifts_x[i] = curr_points[i].x - good_points[i].x;
        shifts_y[i] = curr_points[i].y - good_points[i].y;
    }
    
    // Find median shift.
    cv::Point2f median_shift(median<float>(shifts_x), median<float>(shifts_y));
    xshift.push_back(median_shift.x);
    yshift.push_back(median_shift.y);

    prevFrame = frame.clone(); 
    return true;
}
开发者ID:levante52,项目名称:video-stabilization,代码行数:78,代码来源:stabilizer.cpp

示例8: reshowPlacement

bool place::reshowPlacement(const std::string &scanName,
                            const std::string &zerosFile,
                            const std::string &doorName,
                            const place::DoorDetector &d,
                            const std::string &preDone) {
  const std::string buildName = scanName.substr(scanName.find("_") - 3, 3);
  const std::string scanNumber = scanName.substr(scanName.find(".") - 3, 3);
  const std::string placementName =
      buildName + "_placement_" + scanNumber + ".dat";

  std::ifstream in(preDone + placementName, std::ios::in | std::ios::binary);
  if (!in.is_open())
    return false;
  if (!FLAGS_reshow)
    return true;

  if (!FLAGS_quietMode)
    std::cout << placementName << std::endl;

  std::vector<cv::Mat> rotatedScans, toTrim;
  std::vector<Eigen::Vector2i> zeroZero;
  place::loadInScans(scanName, zerosFile, toTrim, zeroZero);
  place::trimScans(toTrim, rotatedScans, zeroZero);

  std::vector<std::vector<place::Door>> doors = loadInDoors(doorName, zeroZero);

  int num;
  in.read(reinterpret_cast<char *>(&num), sizeof(num));
  std::vector<place::posInfo> scores(num);
  for (auto &s : scores)
    in.read(reinterpret_cast<char *>(&s), sizeof(place::posInfo));

  int cutOffNum = place::getCutoffIndex(
      placementName, scores, [](const place::posInfo &s) { return s.score; });
  cutOffNum = FLAGS_top > 0 ? FLAGS_top : cutOffNum;

  num = std::min(num, cutOffNum);

  cvNamedWindow("Preview", CV_WINDOW_NORMAL);

  if (!FLAGS_quietMode)
    std::cout << "Showing minima: " << num << std::endl;

  for (int k = 0; k < std::min(num, (int)scores.size());) {
    auto &currentScore = scores[k];

    const cv::Mat &bestScan = rotatedScans[currentScore.rotation];

    const int xOffset = currentScore.x - zeroZero[currentScore.rotation][0];
    const int yOffset = currentScore.y - zeroZero[currentScore.rotation][1];

    cv::Mat_<cv::Vec3b> output = fpColor.clone();

    auto &res = d.getResponse(0);
    for (int i = 0; i < res.outerSize(); ++i)
      for (Eigen::SparseMatrix<char>::InnerIterator it(res, i); it; ++it)
        if (it.value() > 1)
          output(it.row(), it.col()) = cv::Vec3b(0, 255, 0);

    for (int j = 0; j < bestScan.rows; ++j) {
      if (j + yOffset < 0 || j + yOffset >= fpColor.rows)
        continue;
      const uchar *src = bestScan.ptr<uchar>(j);
      for (int i = 0; i < bestScan.cols; ++i) {
        if (i + xOffset < 0 || i + xOffset >= fpColor.cols)
          continue;

        if (src[i] != 255) {
          output(j + yOffset, i + xOffset) = cv::Vec3b(0, 0, 255 - src[i]);
        }
      }
    }

    for (int j = -10; j < 10; ++j)
      for (int i = -10; i < 10; ++i)
        output(j + currentScore.y, i + currentScore.x) = cv::Vec3b(255, 0, 0);

    for (auto &d : doors[currentScore.rotation]) {
      auto color = randomColor();
      for (double x = 0; x < d.w; ++x) {
        Eigen::Vector3i index =
            (d.corner + x * d.xAxis + Eigen::Vector3d(xOffset, yOffset, 0))
                .unaryExpr([](auto v) { return std::round(v); })
                .cast<int>();

        for (int k = -2; k <= 2; ++k) {
          for (int l = -2; l <= 2; ++l) {
            output(index[1] + k, index[0] + l) = color;
          }
        }
      }
    }

    if (!FLAGS_quietMode) {
      std::cout << &currentScore << std::endl;
      std::cout << "% of scan unexplained: "
                << currentScore.scanFP / currentScore.scanPixels
                << "   Index: " << k << std::endl
                << std::endl;
    }
//.........这里部分代码省略.........
开发者ID:erikwijmans,项目名称:WashU_Research,代码行数:101,代码来源:placeScanHelper.cpp

示例9: filter

void OpenCV_Function::filter(){
	g_srcImage=cv::imread("girl-t1.jpg");

	//均值滤波(邻域平均滤波)
	cv::blur(g_srcImage,g_resultImage,cv::Size(2,2)); //值越大越模糊
	cv::imshow( "blur", g_resultImage );

	//高斯滤波
	cv::GaussianBlur( g_srcImage, g_resultImage, cv::Size( 99, 99 ), 0, 0 );   //值越大越模糊,且值只能是正数和奇数
	cv::imshow("GaussianBlur", g_resultImage );

	//方框滤波
	cv::boxFilter(g_srcImage,g_resultImage,-1,cv::Size(5,5));//
	cv::imshow("boxFilter", g_resultImage );

	//中值滤波
	cv::medianBlur(g_srcImage,g_resultImage,9);//这个参数必须是大于1的奇数
	cv::imshow("medianBlur", g_resultImage );

	//bilateralFilter 双边滤波器
	cv::bilateralFilter( g_srcImage, g_resultImage, 25, 25*2, 25/2 );  
	cv::imshow("bilateralFilter", g_resultImage );

	//erode函数,使用像素邻域内的局部极小运算符来腐蚀一张图片
	cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,3));  
	cv::erode(g_srcImage,g_resultImage,element,cv::Point(-1,-1),1);
	cv::imshow("erode", g_resultImage );
	//dilate函数,使用像素邻域内的局部极大运算符来膨胀一张图片
	cv::dilate(g_srcImage,g_resultImage,element);
	cv::imshow("dilate", g_resultImage );

	//开运算(Opening Operation),其实就是先腐蚀后膨胀的过程
	//dst=open(src,element)=dilate(erode(src,element));

	//闭运算(Closing Operation),  其实就是先膨胀后腐蚀的过程
	//dst=close(src,element)=erode(dilate(src,element));

	//形态学梯度(Morphological Gradient)为膨胀图与腐蚀图之差
	//dst=morph_grad(src,element)=dilate(src,element)-erode(src,element);

	//顶帽运算(Top Hat)又常常被译为”礼帽“运算。为原图像与上文刚刚介绍的“开运算“的结果图之差
	//dst=src-open(src,element);

	//黑帽(Black Hat)运算为”闭运算“的结果图与原图像之差。
	//dst=close(src,element)-src;
	cv::morphologyEx(g_srcImage,g_resultImage, cv::MORPH_OPEN, element);  
	cv::imshow( "morphologyEx", g_resultImage );

	//最简单的canny用法,拿到原图后直接用。  
	//这个函数阈值1和阈值2两者的小者用于边缘连接,而大者用来控制强边缘的初始段,推荐的高低阈值比在2:1到3:1之间。
	cv::Mat cannyMat=g_srcImage.clone();
	cv::Canny(cannyMat,cannyMat,3,9);
	cv::imshow( "Canny", cannyMat );
	//----------------------------------------------------------------------------------  
    //  二、高阶的canny用法,转成灰度图,降噪,用canny,最后将得到的边缘作为掩码,拷贝原图到效果图上,得到彩色的边缘图  
    //----------------------------------------------------------------------------------  
	cv::Mat dst,edge,gray; 
	dst.create( g_srcImage.size(), g_srcImage.type() );   // 【1】创建与src同类型和大小的矩阵(dst)  
	cv::cvtColor(g_srcImage,gray,CV_BGR2GRAY);// 【2】将原图像转换为灰度图像  
    cv::blur( gray, edge, cv::Size(3,3) );  // 【3】先用使用 3x3内核来降噪  
	cv::Canny(edge,edge,3,9);
	dst = cv::Scalar::all(0);   //【5】将dst内的所有元素设置为0   
	g_srcImage.copyTo( dst, edge); //【6】使用Canny算子输出的边缘图g_cannyDetectedEdges作为掩码,来将原图g_srcImage拷到目标图g_dstImage中  
	cv::imshow( "Canny2", dst );
	
	//----------------------------------------------------------------------------------  
    //  调用Sobel函数的实例代码
    //----------------------------------------------------------------------------------  
	cv::Mat grad_x, grad_y;  
   cv::Mat abs_grad_x, abs_grad_y;  
	 //【3】求 X方向梯度  
    cv::Sobel( g_srcImage, grad_x, CV_16S, 1, 0, 3, 1, 1, cv::BORDER_DEFAULT );  
    cv::convertScaleAbs( grad_x, abs_grad_x );  
    cv::imshow("【效果图】 X方向Sobel", abs_grad_x);   
  
    //【4】求Y方向梯度  
    cv::Sobel( g_srcImage, grad_y, CV_16S, 0, 1, 3, 1, 1, cv::BORDER_DEFAULT );  
    cv::convertScaleAbs( grad_y, abs_grad_y );  
    cv::imshow("【效果图】Y方向Sobel", abs_grad_y);   
  
    //【5】合并梯度(近似)  
    addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst );  
    cv::imshow("【效果图】整体方向Sobel", dst);   
  
	//----------------------------------------------------------------------------------  
    // Laplacian 算子是n维欧几里德空间中的一个二阶微分算子,定义为梯度grad()的散度div()。因此如果f是二阶可微的实函数,则f的拉普拉斯算子定义为:
    //----------------------------------------------------------------------------------  
	cv::Laplacian( g_srcImage, dst, CV_16S, 3, 1, 0, cv::BORDER_DEFAULT );  
	cv::convertScaleAbs( dst, abs_grad_y );  
	cv::imshow("Laplacian", abs_grad_y); 
	

	//----------------------------------------------------------------------------------  
    //  scharr一般我就直接称它为滤波器,而不是算子。上文我们已经讲到,它在OpenCV中主要是配合Sobel算子的运算而存在的,
    //----------------------------------------------------------------------------------  
	 //【3】求 X方向梯度  
	cv::Scharr( g_srcImage, grad_x, CV_16S, 1, 0, 1, 0, cv::BORDER_DEFAULT );  
    cv:: convertScaleAbs( grad_x, abs_grad_x );  
    cv::imshow("【效果图】 X方向Scharr", abs_grad_x);   
	//【4】求Y方向梯度  
//.........这里部分代码省略.........
开发者ID:ytzhsh,项目名称:LearnOpenCV,代码行数:101,代码来源:OpenCV_Function.cpp

示例10: goal_pipeline

scoredContour goal_pipeline(cv::Mat input, bool suppress_output=false) {
        std::vector< std::vector<cv::Point> > contours;

        cv::Mat contourOut = input.clone();

        cv::findContours(contourOut, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
        std::vector<scoredContour> finalscores;

        if(!suppress_output) { std::cout << "Found " << contours.size() << " contours." << std::endl; }

        unsigned int ctr = 0;
        for(std::vector< std::vector<cv::Point> >::iterator i = contours.begin();
            i != contours.end(); ++i) {
                double area = cv::contourArea(*i);
                double perimeter = cv::arcLength(*i, true);
                cv::Rect bounds = cv::boundingRect(*i);

                const double cvarea_target = (80.0/240.0);
                const double asratio_target = (20.0/12.0);
                const double area_threshold = 1000;

                /* Area Thresholding Test
                 * Only accept contours of a certain total size.
                 */

                if(area < area_threshold) {
                    continue;
                }

				if(!suppress_output) {
					std::cout << std::endl;
			        std::cout << "Contour " << ctr << ": " << std::endl;
			        ctr++;
			        std::cout << "Area: "  << area << std::endl;
			        std::cout << "Perimeter: " << perimeter << std::endl;
				}

                /* Coverage Area Test
                 * Compare particle area vs. Bounding Rectangle area.
                 * score = 1 / abs((1/3)- (particle_area / boundrect_area))
                 * Score decreases linearly as coverage area tends away from 1/3. */
                double cvarea_score = 0;

                double coverage_area = area / bounds.area();
                cvarea_score = scoreDistanceFromTarget(cvarea_target, coverage_area);

                /* Aspect Ratio Test
                 * Computes aspect ratio of detected objects.
                 */

                double tmp = bounds.width;
                double aspect_ratio = tmp / bounds.height;
                double ar_score = scoreDistanceFromTarget(asratio_target, aspect_ratio);

                /* Image Moment Test
                 * Computes image moments and compares it to known values.
                 */

                cv::Moments m = cv::moments(*i);
                double moment_score = scoreDistanceFromTarget(0.28, m.nu02);

                /* Image Orientation Test
                 * Computes angles off-axis or contours.
                 */
                // theta = (1/2)atan2(mu11, mu20-mu02) radians
                // theta ranges from -90 degrees to +90 degrees.
                double theta = (atan2(m.mu11,m.mu20-m.mu02) * 90) / pi;
                double angle_score = (90 - fabs(theta))+10;

				if(!suppress_output) {
		            std::cout << "nu-02: " << m.nu02 << std::endl;
		            std::cout << "CVArea: "  <<  coverage_area << std::endl;
		            std::cout << "AsRatio: " << aspect_ratio << std::endl;
		            std::cout << "Orientation: " << theta << std::endl;
				}

                double total_score = (moment_score + cvarea_score + ar_score + angle_score) / 4;

				if(!suppress_output) {
		            std::cout << "CVArea Score: "  <<  cvarea_score << std::endl;
		            std::cout << "AsRatio Score: " << ar_score << std::endl;
		            std::cout << "Moment Score: " << moment_score << std::endl;
		            std::cout << "Angle Score: " << angle_score << std::endl;
		            std::cout << "Total Score: " << total_score << std::endl;
				}

                finalscores.push_back(std::make_pair(total_score, std::move(*i)));
        }

       if(finalscores.size() > 0) {
			std::sort(finalscores.begin(), finalscores.end(), &scoresort);

			return finalscores.back();
		} else {
			return std::make_pair(0.0, std::vector<cv::Point>());	
		}
}
开发者ID:stmobo,项目名称:lib5002,代码行数:97,代码来源:visproc.cpp

示例11:

bool pixkit::labeling::twoPass(const cv::Mat &src,cv::Mat &dst,const int offset){

	//////////////////////////////////////////////////////////////////////////
	///// EXCEPTION
	if(src.type()!=CV_8UC1){
		CV_Error(CV_StsBadArg,"[xxx] src's type should be CV_8UC1.");
	}

	//標籤為從1開始
	int LableNumber = 1;
	bool **Table = NULL;
	bool *assitT = NULL;
	bool *checkTable = NULL;
	int *refTable = NULL;
	int **result = NULL;
	int *resultT = NULL;

	int C[5],min,temp;
	int W,A,Q,E,S;

	dst = src.clone();
	//Mat convert to [][]
	result = new int *[src.rows];
	resultT = new int [src.rows*src.cols];
	memset(resultT,0,src.rows*src.cols);
	for(int i=0;i<src.rows;i++,resultT+=src.cols)
		result[i] = resultT;

	for(int i=0;i<src.rows;i++)
	{
		int temp = i*src.cols;
		for(int j=0;j<src.cols;j++)
		{
			result[i][j] = (int)src.data[temp+j];
		}
	}


	//正規化用標籤
	std::vector<int> ObjectIndex;

	//第一次掃描
	for(int i=0;i<src.rows;i++)
	{
		for(int j=0;j<src.cols;j++)
		{
			C[0] = result[i][j];
			if (C[0] <128)
				continue;
			/*如果 C[0] > 128 代表有物件*/
			min = src.rows*src.cols;

			if(j-1 <0)
				C[1] = 0;
			else
				C[1] = result[i][j-1];
			if (i-1<0 || j-1 <0)
				C[2] = 0;
			else
				C[2] = result[i-1][j-1];
			if(i-1<0)
				C[3] = 0;
			else
				C[3] = result[i-1][j];
			if (i-1<0 || j+1 >=src.cols)
				C[4] = 0;
			else
				C[4] = result[i-1][j+1];


			if(C[1] ==0 && C[2] ==0 && C[3] ==0 && C[4] ==0)
			{
				C[0] = LableNumber;
				LableNumber++;
			}
			else
			{
				for(int k=1;k<=4;k++)
				{
					if(C[k]<min && C[k] != 0)
						min = C[k];
				}
				C[0] = min;
			}
			result[i][j] = C[0];
		}
	}


	//LableNumber != 1 代表有前景物件存在
	//LableNumber == 1 代表無前景物件
	//使用動態新增Table記憶體
	if(LableNumber != 1)
	{
		Table = new bool *[LableNumber];
		assitT = new bool [LableNumber*LableNumber];
		memset(assitT,0,LableNumber*LableNumber);
		refTable = new int [LableNumber];
		checkTable = new bool [LableNumber];
		for(int i=0;i<LableNumber;i++,assitT+=LableNumber)
//.........这里部分代码省略.........
开发者ID:B9907012,项目名称:pixkit,代码行数:101,代码来源:labeling.cpp

示例12: LoadImage

void SolutionViewer::LoadImage(cv::Mat& img)
{
	input_img = img.clone();
	image_loaded_flag = true;
}
开发者ID:Jigarsenjallia,项目名称:Sudoku-GUI,代码行数:5,代码来源:SolutionViewer.cpp

示例13:

WeightedGaussian::WeightedGaussian(double weight, const cv::Mat &mean, const cv::Mat &covariance)
    : _Weight(weight), _Mean(mean.clone()), _Covariance(covariance.clone())
{
    
}
开发者ID:CS598TeamAwesome,项目名称:Project2-LocalDescriptorAndBagOfFeature,代码行数:5,代码来源:GMM.cpp

示例14: normalizePoints

void FSolver::normalizePoints(const cv::Mat &P, cv::Mat &Q) const
{
  // turn P into homogeneous coords first
  
  if(P.rows == 3) // P is 3xN
  {
    if(P.type() == CV_64F)
      Q = P.clone();
    else
      P.convertTo(Q, CV_64F);
    
    cv::Mat aux = Q.row(0);
    cv::divide(aux, Q.row(2), aux);
    
    aux = Q.row(1);
    cv::divide(aux, Q.row(2), aux);
    
  }
  else if(P.rows == 2) // P is 2xN
  {
    const int N = P.cols;
    
    Q.create(3, N, CV_64F);
    Q.row(2) = 1;
    if(P.type() == CV_64F)
    {
      Q.rowRange(0,2) = P * 1;
    }
    else
    {
      cv::Mat aux = Q.rowRange(0,2);
      P.convertTo(aux, CV_64F);
    }
  }
  else if(P.cols == 3) // P is Nx3
  {
    if(P.type() == CV_64F)
      Q = P.t();
    else
    {
      P.convertTo(Q, CV_64F);
      Q = Q.t();
    } 
    
    cv::Mat aux = Q.row(0);
    cv::divide(aux, Q.row(2), aux);
    
    aux = Q.row(1);
    cv::divide(aux, Q.row(2), aux);
  }
  else if(P.cols == 2) // P is Nx2
  {
    const int N = P.rows;
    
    Q.create(N, 3, CV_64F);
    Q.col(2) = 1;
    cv::Mat aux;
    if(P.type() == CV_64F)
    {
      aux = Q.rowRange(0,2);
      P.rowRange(0,2).copyTo(aux);
    }
    else
    {
      aux = Q.colRange(0,2);
      P.convertTo(aux, CV_64F);
    }
    
    Q = Q.t();
  }  
}
开发者ID:liz-murphy,项目名称:rslam_dev,代码行数:71,代码来源:FSolver.cpp

示例15: RGBDOdometry

bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
                       const cv::Mat& image0, const cv::Mat& _depth0, const cv::Mat& validMask0,
                       const cv::Mat& image1, const cv::Mat& _depth1, const cv::Mat& validMask1,
                       const cv::Mat& cameraMatrix, float minDepth, float maxDepth, float maxDepthDiff,
                       const std::vector<int>& iterCounts, const std::vector<float>& minGradientMagnitudes,
                       int transformType )
{
    const int sobelSize = 3;
    const double sobelScale = 1./8;

    Mat depth0 = _depth0.clone(),
        depth1 = _depth1.clone();

    // check RGB-D input data
    CV_Assert( !image0.empty() );
    CV_Assert( image0.type() == CV_8UC1 );
    CV_Assert( depth0.type() == CV_32FC1 && depth0.size() == image0.size() );

    CV_Assert( image1.size() == image0.size() );
    CV_Assert( image1.type() == CV_8UC1 );
    CV_Assert( depth1.type() == CV_32FC1 && depth1.size() == image0.size() );

    // check masks
    CV_Assert( validMask0.empty() || (validMask0.type() == CV_8UC1 && validMask0.size() == image0.size()) );
    CV_Assert( validMask1.empty() || (validMask1.type() == CV_8UC1 && validMask1.size() == image0.size()) );

    // check camera params
    CV_Assert( cameraMatrix.type() == CV_32FC1 && cameraMatrix.size() == Size(3,3) );

    // other checks
    CV_Assert( iterCounts.empty() || minGradientMagnitudes.empty() ||
               minGradientMagnitudes.size() == iterCounts.size() );
    CV_Assert( initRt.empty() || (initRt.type()==CV_64FC1 && initRt.size()==Size(4,4) ) );

    vector<int> defaultIterCounts;
    vector<float> defaultMinGradMagnitudes;
    vector<int> const* iterCountsPtr = &iterCounts;
    vector<float> const* minGradientMagnitudesPtr = &minGradientMagnitudes;

    if( iterCounts.empty() || minGradientMagnitudes.empty() )
    {
        defaultIterCounts.resize(4);
        defaultIterCounts[0] = 7;
        defaultIterCounts[1] = 7;
        defaultIterCounts[2] = 7;
        defaultIterCounts[3] = 10;

        defaultMinGradMagnitudes.resize(4);
        defaultMinGradMagnitudes[0] = 12;
        defaultMinGradMagnitudes[1] = 5;
        defaultMinGradMagnitudes[2] = 3;
        defaultMinGradMagnitudes[3] = 1;

        iterCountsPtr = &defaultIterCounts;
        minGradientMagnitudesPtr = &defaultMinGradMagnitudes;
    }

    preprocessDepth( depth0, depth1, validMask0, validMask1, minDepth, maxDepth );

    vector<Mat> pyramidImage0, pyramidDepth0,
                pyramidImage1, pyramidDepth1, pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1,
                pyramidCameraMatrix;
    buildPyramids( image0, image1, depth0, depth1, cameraMatrix, sobelSize, sobelScale, *minGradientMagnitudesPtr,
                   pyramidImage0, pyramidDepth0, pyramidImage1, pyramidDepth1,
                   pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1, pyramidCameraMatrix );

    Mat resultRt = initRt.empty() ? Mat::eye(4,4,CV_64FC1) : initRt.clone();
    Mat currRt, ksi;
    for( int level = (int)iterCountsPtr->size() - 1; level >= 0; level-- )
    {
        const Mat& levelCameraMatrix = pyramidCameraMatrix[level];

        const Mat& levelImage0 = pyramidImage0[level];
        const Mat& levelDepth0 = pyramidDepth0[level];
        Mat levelCloud0;
        cvtDepth2Cloud( pyramidDepth0[level], levelCloud0, levelCameraMatrix );

        const Mat& levelImage1 = pyramidImage1[level];
        const Mat& levelDepth1 = pyramidDepth1[level];
        const Mat& level_dI_dx1 = pyramid_dI_dx1[level];
        const Mat& level_dI_dy1 = pyramid_dI_dy1[level];

        CV_Assert( level_dI_dx1.type() == CV_16S );
        CV_Assert( level_dI_dy1.type() == CV_16S );

        const double fx = levelCameraMatrix.at<double>(0,0);
        const double fy = levelCameraMatrix.at<double>(1,1);
        const double determinantThreshold = 1e-6;

        Mat corresps( levelImage0.size(), levelImage0.type() );

        // Run transformation search on current level iteratively.
        for( int iter = 0; iter < (*iterCountsPtr)[level]; iter ++ )
        {
            int correspsCount = computeCorresp( levelCameraMatrix, levelCameraMatrix.inv(), resultRt.inv(DECOMP_SVD),
                                                levelDepth0, levelDepth1, pyramidTexturedMask1[level], maxDepthDiff,
                                                corresps );

            if( correspsCount == 0 )
                break;
//.........这里部分代码省略.........
开发者ID:MasaMune692,项目名称:alcexamples,代码行数:101,代码来源:rgbdodometry.cpp


注:本文中的cv::Mat::clone方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。