当前位置: 首页>>代码示例>>C#>>正文


C# Image.FindCornerSubPix方法代码示例

本文整理汇总了C#中Image.FindCornerSubPix方法的典型用法代码示例。如果您正苦于以下问题:C# Image.FindCornerSubPix方法的具体用法?C# Image.FindCornerSubPix怎么用?C# Image.FindCornerSubPix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.FindCornerSubPix方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: GetDisparitiesLK

        public double[] GetDisparitiesLK(Image<Gray, byte> leftImg, Image<Gray, byte> rightImg, PointF[] points, Image<Gray, short> precalcDepthMap, VisualOdometerDisparitiesParams parameters)
        {
            var param = (VisualOdometerDisparitiesParamsLK)parameters;
            var res = new double[points.Count()];

            Size WinSize = param.WinSize;// new Size(80, 80);
            int PyrLevel = param.PyrLevel;// 4;
            MCvTermCriteria PyrLkTerm = param.PyrLkTerm;// new MCvTermCriteria(100, 0.001);

            var status = new Byte[points.Count()];
            var error = new float[points.Count()];
            var rightPoints = new PointF[points.Count()];

            var subCorners = new PointF[1][];
            subCorners[0] = points;
            leftImg.FindCornerSubPix(
                subCorners,
                new Size(11, 11),
                new Size(-1, -1),
                new MCvTermCriteria(30, 0.01));

            var leftCorners = subCorners[0];

            var gpuP = new GpuPyrLKOpticalFlow(WinSize, PyrLevel, 30, false);

            OpticalFlow.PyrLK(
                leftImg,
                rightImg,
                leftCorners,
                WinSize,
                PyrLevel,
                PyrLkTerm,
                out rightPoints,
                out status,
                out error);

            for (int i = 0; i < points.Count(); ++i)
            {
                if (status[i] == 1)
                {
                    var disp = leftCorners[i].X - rightPoints[i].X;
                    if (disp < 0)
                    {
                        res[i] = -1;
                    }
                    else
                    {
                        res[i] = disp;
                    }
                }
                else
                {
                    res[i] = -1;
                }
            }

            return res;
        }
开发者ID:JimmHub,项目名称:odoStuff,代码行数:58,代码来源:VisualOdometer.cs

示例2: ImageGrabbed

        void ImageGrabbed()
        {
            //lets get a frame from our capture device
            _img = _capture.RetrieveBgrFrame();
            if (_img == null) return;
            _grayFrame = _img.Convert<Gray, Byte>();
            
            //apply chess board detection
            if (_camera.Calibration.CurrentMode == CameraCalibrationMode.SavingFrames)
            {
                _corners = CameraCalibration.FindChessboardCorners(_grayFrame, _patternSize, CALIB_CB_TYPE.ADAPTIVE_THRESH);
                //we use this loop so we can show a colour image rather than a gray: 
                //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners);

                if (_corners != null) //chess board found
                {
                    //make mesurments more accurate by using FindCornerSubPixel
                    _grayFrame.FindCornerSubPix(new PointF[1][] { _corners }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //if go button has been pressed start aquiring frames else we will just display the points
                    if (_camera.Calibration.StartFlag)
                    {
                        _frameArrayBuffer[_frameBufferSavepoint] = _grayFrame.Copy(); //store the image
                        _frameBufferSavepoint++;//increase buffer positon

                        //check the state of buffer
                        if (_frameBufferSavepoint == _frameArrayBuffer.Length) _camera.Calibration.CurrentMode = CameraCalibrationMode.CalculatingIntrinsics; //buffer full
                    }

                    //draw the results
                    _img.Draw(new CircleF(_corners[0], 3), new Bgr(Color.Yellow), 1);
                    for (int i = 1; i < _corners.Length; i++)
                    {
                        _img.Draw(new LineSegment2DF(_corners[i - 1], _corners[i]), _lineColourArray[i], 2);
                        _img.Draw(new CircleF(_corners[i], 3), new Bgr(Color.Yellow), 1);
                    }
                    //calibrate the delay bassed on size of buffer
                    //if buffer small you want a big delay if big small delay
                    Thread.Sleep(100);//allow the user to move the board to a different position
                }
                _corners = null;
            }
            if (_camera.Calibration.CurrentMode == CameraCalibrationMode.CalculatingIntrinsics)
            {
                //we can do this in the loop above to increase speed
                for (int k = 0; k < _frameArrayBuffer.Length; k++)
                {

                    _cornersPointsList[k] = CameraCalibration.FindChessboardCorners(_frameArrayBuffer[k], _patternSize, CALIB_CB_TYPE.ADAPTIVE_THRESH);
                    //for accuracy
                    _grayFrame.FindCornerSubPix(_cornersPointsList, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //Fill our objects list with the real world mesurments for the intrinsic calculations
                    List<MCvPoint3D32f> objectList = new List<MCvPoint3D32f>();
                    for (int i = 0; i < Height; i++)
                    {
                        for (int j = 0; j < Width; j++)
                        {
                            objectList.Add(new MCvPoint3D32f(j * 20.0F, i * 20.0F, 0.0F));
                        }
                    }
                    _cornersObjectList[k] = objectList.ToArray();
                }
                ExtrinsicCameraParameters[] ex;
                //our error should be as close to 0 as possible
                _camera.Calibration.Error = CameraCalibration.CalibrateCamera(
                    _cornersObjectList, 
                    _cornersPointsList, 
                    _grayFrame.Size, 
                    _camera.Calibration.IntrinsicParameters, 
                    CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, 
                    new MCvTermCriteria(30, 0.1), 
                    out ex);

                //_camera.Calibration.ExtrinsicParameters = ex;

                //set up to allow another calculation
                //SetButtonState(true);
                _camera.Calibration.StartFlag = false;

                //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                Console.WriteLine("Intrinsic Calculation Error: " + _camera.Calibration.Error); //display the results to the user
                _camera.Calibration.CurrentMode = CameraCalibrationMode.Calibrated;
            }
            if (_camera.Calibration.CurrentMode == CameraCalibrationMode.Calibrated)
            {
                _corners = CameraCalibration.FindChessboardCorners(_grayFrame, _patternSize, CALIB_CB_TYPE.ADAPTIVE_THRESH);
                //we use this loop so we can show a colour image rather than a gray: 
                //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners);

                if (_corners != null) //chess board found
                {
                    //make mesurments more accurate by using FindCornerSubPixel
                    _grayFrame.FindCornerSubPix(new PointF[1][] { _corners }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //draw the results
                    _img.Draw(new CircleF(_corners[0], 3), new Bgr(Color.Yellow), 1);
                    for (int i = 1; i < _corners.Length; i++)
                    {
//.........这里部分代码省略.........
开发者ID:cosmo1911,项目名称:UniMoveStation,代码行数:101,代码来源:CameraCalibrationService.cs

示例3: TestChessboardCalibration

        public void TestChessboardCalibration()
        {
            Size patternSize = new Size(6, 6);

             Image<Gray, Byte> chessboardImage = new Image<Gray, byte>("chessBoard.jpg");
             PointF[] corners;
             bool patternFound =
            CameraCalibration.FindChessboardCorners(
            chessboardImage,
            patternSize,
            Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH | Emgu.CV.CvEnum.CALIB_CB_TYPE.NORMALIZE_IMAGE | Emgu.CV.CvEnum.CALIB_CB_TYPE.FILTER_QUADS,
            out corners);

             chessboardImage.FindCornerSubPix(
            new PointF[][] { corners },
            new Size(10, 10),
            new Size(-1, -1),
            new MCvTermCriteria(0.05));

             CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, corners, patternFound);
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:21,代码来源:AutoTestVarious.cs

示例4: _Capture_ImageGrabbed

        /// <summary>
        /// main function processing of the image data
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void _Capture_ImageGrabbed(object sender, EventArgs e)
        {
            //lets get a frame from our capture device
            img = _Capture.RetrieveBgrFrame();
            Gray_Frame = img.Convert<Gray, Byte>();

            //apply chess board detection
            if (currentMode == Mode.SavingFrames)
            {
                corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);
                //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners);

                if (corners != null) //chess board found
                {
                    //make mesurments more accurate by using FindCornerSubPixel
                    Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new System.Drawing.Size(11, 11), new System.Drawing.Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //if go button has been pressed start aquiring frames else we will just display the points
                    if (start_Flag)
                    {
                        Frame_array_buffer[frame_buffer_savepoint] = Gray_Frame.Copy(); //store the image
                        frame_buffer_savepoint++;//increase buffer positon

                        //check the state of buffer
                        if (frame_buffer_savepoint == Frame_array_buffer.Length) currentMode = Mode.Caluculating_Intrinsics; //buffer full
                    }

                    //dram the results
                    img.Draw(new CircleF(corners[0], 3), new Bgr(System.Drawing.Color.Yellow), 1);
                    for (int i = 1; i < corners.Length; i++)
                    {
                        img.Draw(new LineSegment2DF(corners[i - 1], corners[i]), line_colour_array[i], 2);
                        img.Draw(new CircleF(corners[i], 3), new Bgr(System.Drawing.Color.Yellow), 1);
                    }
                    //calibrate the delay bassed on size of buffer
                    //if buffer small you want a big delay if big small delay
                    Thread.Sleep(100);//allow the user to move the board to a different position
                }
                corners = null;
            }
            if (currentMode == Mode.Caluculating_Intrinsics)
            {
                //we can do this in the loop above to increase speed
                for (int k = 0; k < Frame_array_buffer.Length; k++)
                {

                    corners_points_list[k] = CameraCalibration.FindChessboardCorners(Frame_array_buffer[k], patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);
                    //for accuracy
                    Gray_Frame.FindCornerSubPix(corners_points_list, new System.Drawing.Size(11, 11), new System.Drawing.Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //Fill our objects list with the real world mesurments for the intrinsic calculations
                    List<MCvPoint3D32f> object_list = new List<MCvPoint3D32f>();
                    for (int i = 0; i < height; i++)
                    {
                        for (int j = 0; j < width; j++)
                        {
                            object_list.Add(new MCvPoint3D32f(j * 20.0F, i * 20.0F, 0.0F));
                        }
                    }
                    corners_object_list[k] = object_list.ToArray();
                }

                //our error should be as close to 0 as possible

                double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, new MCvTermCriteria(30, 0.1), out EX_Param);
                //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                System.Windows.Forms.MessageBox.Show("Intrinsic Calculation Error: " + error.ToString(), "Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user
                currentMode = Mode.Calibrated;
                this.Dispatcher.Invoke((Action)(() =>
                {
                    Write_BTN.IsEnabled = true;
                }));
            }
            if (currentMode == Mode.Calibrated)
            {
                //calculate the camera intrinsics
                Matrix<float> Map1, Map2;
                IC.InitUndistortMap(img.Width, img.Height, out Map1, out Map2);

                //remap the image to the particular intrinsics
                //In the current version of EMGU any pixel that is not corrected is set to transparent allowing the original image to be displayed if the same
                //image is mapped backed, in the future this should be controllable through the flag '0'
                Image<Bgr, Byte> temp = img.CopyBlank();
                CvInvoke.cvRemap(img, temp, Map1, Map2, 0, new MCvScalar(0));
                img = temp.Copy();

                //set up to allow another calculation
                SetButtonState(true);
                start_Flag = false;
            }
            Image<Bgr, byte> mainImage = img.Resize(((double)Main_Picturebox.Width / (double)img.Width), Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
            Main_Picturebox.Image = mainImage;
        }
开发者ID:surgical-robots,项目名称:robot-control-app,代码行数:99,代码来源:GraphicalView.xaml.cs

示例5: _Capture_ImageGrabbed

        /// <summary>
        /// main function processing of the image data
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void _Capture_ImageGrabbed(object sender, EventArgs e)
        {
            //lets get a frame from our capture device
            img = _Capture.RetrieveBgrFrame();
            Gray_Frame = img.Convert<Gray,Byte>();

            //apply chess board detection
            if(currentMode == Mode.SavingFrames)
            {
                corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);
                //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners);

                if (corners != null) //chess board found
                {
                    //make mesurments more accurate by using FindCornerSubPixel
                    Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //if go button has been pressed start aquiring frames else we will just display the points
                    if (start_Flag)
                    {
                        Frame_array_buffer[frame_buffer_savepoint] = Gray_Frame.Copy(); //store the image
                        frame_buffer_savepoint++;//increase buffer positon

                        //check the state of buffer
                        if (frame_buffer_savepoint == Frame_array_buffer.Length) currentMode = Mode.Caluculating_Intrinsics; //buffer full
                    }

                    //dram the results
                    img.Draw(new CircleF(corners[0], 3), new Bgr(Color.Yellow), 1);
                    

                    for(int i = 1; i<corners.Length; i++)
                    {
                        img.Draw(new LineSegment2DF(corners[i - 1], corners[i]), line_colour_array[i], 2);
                        img.Draw(new CircleF(corners[i], 3), new Bgr(Color.Yellow), 1);
                        Console.Write("Length corner2Corner "+i+" : "+new LineSegment2DF(corners[i - 1], corners[i]).Length.ToString()+"\n");

                    }
                    

                    //calibrate the delay bassed on size of buffer
                    //if buffer small you want a big delay if big small delay
                    Thread.Sleep(100);//allow the user to move the board to a different position
                }
                corners = null;
            }           
            if (currentMode == Mode.Caluculating_Intrinsics)
            {
                //we can do this in the loop above to increase speed
                for (int k = 0; k < Frame_array_buffer.Length; k++)
                {
                    
                    corners_points_list[k] = CameraCalibration.FindChessboardCorners(Frame_array_buffer[k], patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);
                    //for accuracy
                    Gray_Frame.FindCornerSubPix(corners_points_list, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //Fill our objects list with the real world mesurments for the intrinsic calculations
                    List<MCvPoint3D32f> object_list = new List<MCvPoint3D32f>();
                    for (int i = 0; i < height; i++)
                    {
                        for (int j = 0; j < width; j++)
                        {
                            object_list.Add(new MCvPoint3D32f(j*31.0F, i*31.0F, 0.0F));
                        }
                    }
                    corners_object_list[k] = object_list.ToArray();
                }

                //our error should be as close to 0 as possible

                double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, out EX_Param);
                //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                MessageBox.Show("Intrinsic Calculation Error: " + error.ToString(), "Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user
                currentMode = Mode.Calibrated;
                for (int i1 = 0; i1 < 8; i1++)
                {
                    Console.Write(IC.DistortionCoeffs[i1,0].ToString()+"\n");                    
                }
                for (int i1 = 0; i1 < 3; i1++)
                {
                    for (int j1 = 0; j1 < 3;j1++ )
                        Console.Write(IC.IntrinsicMatrix[i1, j1].ToString()+"\t");
                    Console.Write("\n");
                }
                
                
            }
            if (currentMode == Mode.Calibrated)
            {
                //display the original image
                Sub_PicturBox.Image = img.ToBitmap();
                //calculate the camera intrinsics
                Matrix<float> Map1, Map2;
                IC.InitUndistortMap(img.Width, img.Height, out Map1, out Map2);
//.........这里部分代码省略.........
开发者ID:petrind,项目名称:SRTesis2,代码行数:101,代码来源:FrameShooterCameraCalib.cs

示例6: FindFeaturesToTrack

		internal PointF[] FindFeaturesToTrack(Image<Gray, Byte> grayImage, List<TrackedFeature> currentlyTrackedFeatures, int skyRegionBottom, int groundRegionTop)
		{
			if (m_MaskImage == null)
			{
				InitializeMaskImage(grayImage);
			}
			PointF[][] foundFeaturesInChannels = null;

			UpdateMaskImage(grayImage.Size, currentlyTrackedFeatures, skyRegionBottom, groundRegionTop);
			foundFeaturesInChannels = GoodFeaturesToTrack(grayImage, this.MaxFeatureCount, this.QualityLevel, this.MinDistance, this.BlockSize, m_MaskImage);

			//using (Image<Gray, Byte> maskImage = CreateMask(grayImage.Size, currentlyTrackedFeatures, skyRegionBottom, groundRegionTop))
			//{
			//    foundFeaturesInChannels = GoodFeaturesToTrack(grayImage, this.MaxFeatureCount, this.QualityLevel, this.MinDistance, this.BlockSize, maskImage);
			//}
			// Next we refine the location of the found features
			grayImage.FindCornerSubPix(foundFeaturesInChannels, new Size(c_WinSize, c_WinSize), new Size(-1, -1), m_SubCornerTerminationCriteria);
			return foundFeaturesInChannels[0];
		}
开发者ID:Tymolc,项目名称:drh-visual-odometry,代码行数:19,代码来源:OpticalFlow.cs

示例7: FindChessboardCorners

 private static PointF[] FindChessboardCorners(Image<Gray, byte> image, Size patternSize)
 {
     var corners = CameraCalibration.FindChessboardCorners(image, patternSize, CALIB_CB_TYPE.ADAPTIVE_THRESH);
     if (corners != null)
     {
         image.FindCornerSubPix(new[] {corners}, new Size(11, 11), new Size(-1, -1),
             new MCvTermCriteria(30, 0.01));
     }
     return corners;
 }
开发者ID:zyh329,项目名称:adas,代码行数:10,代码来源:Calibration.cs

示例8: StartCapture

        public async void StartCapture()
        {
            if (!Calibration.Camera1.Design && !Calibration.Camera2.Design)
            {
                _ctsCameraCalibration = new CancellationTokenSource();
                CancellationToken token = _ctsCameraCalibration.Token;

                _capture1.StartTracker(PSMoveTrackerExposure.High);
                _capture2.StartTracker(PSMoveTrackerExposure.High);
                try
                {
                    _captureTask = Task.Run(() =>
                    {
                        while (!token.IsCancellationRequested)
                        {
                            _capture1.UpdateTracker();
                            _capture1.UpdateImage();
                            _capture2.UpdateTracker();
                            _capture2.UpdateImage();
                            #region Frame Aquasition
                            //Aquire the frames or calculate two frames from one camera
                            _frameS1 = _capture1.GetImage();
                            if (_frameS1 == null) return;
                            _grayFrameS1 = _frameS1.Convert<Gray, Byte>();
                            _frameS2 = _capture2.GetImage();
                            if (_frameS2 == null) return;
                            _grayFrameS2 = _frameS2.Convert<Gray, Byte>();
                            #endregion

                            #region Saving Chessboard Corners in Buffer
                            if (Calibration.CurrentMode == CameraCalibrationMode.SavingFrames)
                            {
                                //Find the chessboard in bothe images
                                _cornersLeft = CameraCalibration.FindChessboardCorners(_grayFrameS1, _patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);
                                _cornersRight = CameraCalibration.FindChessboardCorners(_grayFrameS2, _patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);

                                //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners);
                                //we we only do this is the chessboard is present in both images
                                if (_cornersLeft != null && _cornersRight != null) //chess board found in one of the frames?
                                {
                                    //make mesurments more accurate by using FindCornerSubPixel
                                    _grayFrameS1.FindCornerSubPix(new PointF[1][] { _cornersLeft }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.01));
                                    _grayFrameS2.FindCornerSubPix(new PointF[1][] { _cornersRight }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.01));

                                    //if go button has been pressed start aquiring frames else we will just display the points
                                    if (Calibration.StartFlag)
                                    {
                                        //save the calculated points into an array
                                        _cornersPointsLeft[_bufferSavepoint] = _cornersLeft;
                                        _cornersPointsRight[_bufferSavepoint] = _cornersRight;
                                        _bufferSavepoint++;//increase buffer positon

                                        //check the state of buffer
                                        if (_bufferSavepoint == Calibration.FrameBufferSize) Calibration.CurrentMode = CameraCalibrationMode.CalculatingIntrinsics; //buffer full

                                        //Show state of Buffer
                                        //UpdateTitle("Form1: Buffer " + buffer_savepoint.ToString() + " of " + buffer_length.ToString());
                                    }

                                    //draw the results
                                    _frameS1.Draw(new CircleF(_cornersLeft[0], 3), new Bgr(Color.Yellow), 1);
                                    _frameS2.Draw(new CircleF(_cornersRight[0], 3), new Bgr(Color.Yellow), 1);
                                    for (int i = 1; i < _cornersLeft.Length; i++)
                                    {
                                        //left
                                        _frameS1.Draw(new LineSegment2DF(_cornersLeft[i - 1], _cornersLeft[i]), _lineColourArray[i], 2);
                                        _frameS1.Draw(new CircleF(_cornersLeft[i], 3), new Bgr(Color.Yellow), 1);
                                        //right
                                        _frameS2.Draw(new LineSegment2DF(_cornersRight[i - 1], _cornersRight[i]), _lineColourArray[i], 2);
                                        _frameS2.Draw(new CircleF(_cornersRight[i], 3), new Bgr(Color.Yellow), 1);
                                    }
                                    //calibrate the delay bassed on size of buffer
                                    //if buffer small you want a big delay if big small delay
                                    Thread.Sleep(100);//allow the user to move the board to a different position
                                }
                                _cornersLeft = null;
                                _cornersRight = null;
                            }
                            #endregion
                            #region Calculating Stereo Cameras Relationship
                            if (Calibration.CurrentMode == CameraCalibrationMode.CalculatingIntrinsics)
                            {
                                //fill the MCvPoint3D32f with correct mesurments
                                for (int k = 0; k < Calibration.FrameBufferSize; k++)
                                {
                                    //Fill our objects list with the real world mesurments for the intrinsic calculations
                                    List<MCvPoint3D32f> objectList = new List<MCvPoint3D32f>();
                                    for (int i = 0; i < Height; i++)
                                    {
                                        for (int j = 0; j < Width; j++)
                                        {
                                            objectList.Add(new MCvPoint3D32f(j * Calibration.SquareSizeX, i * Calibration.SquareSizeY, 0.0F));
                                        }
                                    }
                                    _cornersObjectPoints[k] = objectList.ToArray();
                                }
                                //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                                //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                                ExtrinsicCameraParameters extrinsicCameraParameters;
                                Matrix<double> fundamentalMatrix;
//.........这里部分代码省略.........
开发者ID:cosmo1911,项目名称:UniMoveStation,代码行数:101,代码来源:StereoCameraCalibrationService.cs

示例9: InitializeFaceTracking

        private void InitializeFaceTracking()
        {
            _faces = new HaarCascade("haarcascade_frontalface_alt_tree.xml");
            frame = _capture.QueryFrame();
            //We convert it to grayscale
            grayFrame = frame.Convert<Gray, Byte>();
            // We detect a face using haar cascade classifiers, we'll work only on face area
            faceDetected = grayFrame.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
            if (faceDetected[0].Length == 1)
            {
                trackingArea = new Rectangle(faceDetected[0][0].rect.X, faceDetected[0][0].rect.Y, faceDetected[0][0].rect.Width, faceDetected[0][0].rect.Height);

                // Here we enlarge or restrict the search features area on a smaller or larger face area
                float scalingAreaFactor = 0.6f;
                int trackingAreaWidth = (int)(faceDetected[0][0].rect.Width * scalingAreaFactor);
                int trackingAreaHeight = (int)(faceDetected[0][0].rect.Height * scalingAreaFactor);
                int leftXTrackingCoord = faceDetected[0][0].rect.X - (int)(((faceDetected[0][0].rect.X + trackingAreaWidth) - (faceDetected[0][0].rect.X + faceDetected[0][0].rect.Width)) / 2);
                int leftYTrackingCoord = faceDetected[0][0].rect.Y - (int)(((faceDetected[0][0].rect.Y + trackingAreaHeight) - (faceDetected[0][0].rect.Y + faceDetected[0][0].rect.Height)) / 2);
                trackingArea = new Rectangle(leftXTrackingCoord, leftYTrackingCoord, trackingAreaWidth, trackingAreaHeight);

                // Allocating proper working images
                faceImage = new Image<Bgr, Byte>(trackingArea.Width, trackingArea.Height);
                faceGrayImage = new Image<Gray, Byte>(trackingArea.Width, trackingArea.Height);
                frame.ROI = trackingArea;
                frame.Copy(faceImage, null);
                frame.ROI = Rectangle.Empty;
                faceGrayImage = faceImage.Convert<Gray, Byte>();

                // Detecting good features that will be tracked in following frames
                ActualFeature = faceGrayImage.GoodFeaturesToTrack(400, 0.5d, 5d, 5);
                faceGrayImage.FindCornerSubPix(ActualFeature, new Size(5, 5), new Size(-1, -1), new MCvTermCriteria(25, 1.5d));

                // Features computed on a different coordinate system are shifted to their original location
                for (int i = 0; i < ActualFeature[0].Length; i++)
                {
                    ActualFeature[0][i].X += trackingArea.X;
                    ActualFeature[0][i].Y += trackingArea.Y;
                }

                // Computing convex hull
                using (MemStorage storage = new MemStorage())
                    hull = PointCollection.ConvexHull(ActualFeature[0], storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE).ToArray();

                referenceCentroid = FindCentroid(hull);
            }
        }
开发者ID:abraxas4,项目名称:AR-Drone-Project,代码行数:46,代码来源:Form1.cs

示例10: TestGoodFeature

        public void TestGoodFeature()
        {
            using (Image<Bgr, Byte> img = new Image<Bgr, Byte>("stuff.jpg"))
             {
            PointF[][] pts = img.GoodFeaturesToTrack(100, 0.1, 10, 5);
            img.FindCornerSubPix(pts, new Size(5, 5), new Size(-1, -1), new MCvTermCriteria(20, 0.0001));

            foreach (PointF p in pts[0])
               img.Draw(new CircleF(p, 3.0f), new Bgr(255, 0, 0), 1);
             }
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:11,代码来源:AutoTestImage.cs


注:本文中的Image.FindCornerSubPix方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。