当前位置: 首页>>代码示例>>C#>>正文


C# IplImage.Dispose方法代码示例

本文整理汇总了C#中OpenCvSharp.IplImage.Dispose方法的典型用法代码示例。如果您正苦于以下问题:C# IplImage.Dispose方法的具体用法?C# IplImage.Dispose怎么用?C# IplImage.Dispose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在OpenCvSharp.IplImage的用法示例。


在下文中一共展示了IplImage.Dispose方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: ResizeIplTo

        public static float[] ResizeIplTo(IplImage Face, int width, int height)
        {
            IplImage smallerFace =
                new IplImage(new OpenCvSharp.CvSize(width, height),
                                         Face.Depth, Face.NChannels);

            Face.Resize(smallerFace, Interpolation.Linear);

            unsafe
            {
                byte* smallFaceData = smallerFace.ImageDataPtr;
                float[] currentFace = new float[width * height * smallerFace.NChannels * BytesPerPixel(Face.Depth)];
                for (int i = 0; i < smallerFace.Height; i++)
                {
                    for (int j = 0; j < smallerFace.Width; j++)
                    {
                        currentFace[i * smallerFace.WidthStep + j] =
                            (float)smallFaceData[i * smallerFace.WidthStep + j];
                    }
                }

                smallerFace.Dispose();

                return currentFace;
            }
        }
开发者ID:dalinhuang,项目名称:appcollection,代码行数:26,代码来源:NativeIconExtractor.cs

示例2: FindContours

        public FindContours()
        {
            // cvFindContoursm cvDrawContours
            // 画像中から輪郭を検出し,-1~+1までのレベルにある輪郭を描画する

            const int SIZE = 500;

            using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 1))
            {
                // 画像の初期化
                img.Zero();
                for (int i = 0; i < 6; i++)
                {
                    int dx = (i % 2) * 250 - 30;
                    int dy = (i / 2) * 150;
                    if (i == 0)
                    {
                        for (int j = 0; j <= 10; j++)
                        {
                            double angle = (j + 5) * Cv.PI / 21;
                            CvPoint p1 = new CvPoint(Cv.Round(dx + 100 + j * 10 - 80 * Math.Cos(angle)), Cv.Round(dy + 100 - 90 * Math.Sin(angle)));
                            CvPoint p2 = new CvPoint(Cv.Round(dx + 100 + j * 10 - 30 * Math.Cos(angle)), Cv.Round(dy + 100 - 30 * Math.Sin(angle)));
                            Cv.Line(img, p1, p2, CvColor.White, 1, LineType.AntiAlias, 0);
                        }
                    }
                    Cv.Ellipse(img, new CvPoint(dx + 150, dy + 100), new CvSize(100, 70), 0, 0, 360, CvColor.White, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 115, dy + 70), new CvSize(30, 20), 0, 0, 360, CvColor.Black, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 185, dy + 70), new CvSize(30, 20), 0, 0, 360, CvColor.Black, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 115, dy + 70), new CvSize(15, 15), 0, 0, 360, CvColor.White, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 185, dy + 70), new CvSize(15, 15), 0, 0, 360, CvColor.White, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 115, dy + 70), new CvSize(5, 5), 0, 0, 360, CvColor.Black, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 185, dy + 70), new CvSize(5, 5), 0, 0, 360, CvColor.Black, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 150, dy + 100), new CvSize(10, 5), 0, 0, 360, CvColor.Black, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 150, dy + 150), new CvSize(40, 10), 0, 0, 360, CvColor.Black, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 27, dy + 100), new CvSize(20, 35), 0, 0, 360, CvColor.White, -1, LineType.AntiAlias, 0);
                    Cv.Ellipse(img, new CvPoint(dx + 273, dy + 100), new CvSize(20, 35), 0, 0, 360, CvColor.White, -1, LineType.AntiAlias, 0);
                }

                // 輪郭の検出
                CvSeq<CvPoint> contours;
                CvMemStorage storage = new CvMemStorage();
                // native style
                Cv.FindContours(img, storage, out contours, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple);
                contours = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, 3, true);
                
                // wrapper style
                //img.FindContours(storage, out contours, ContourRetrieval.Tree, ContourChain.ApproxSimple);
                //contours = contours.ApproxPoly(storage, ApproxPolyMethod.DP, 3, true);

                // ウィンドウに表示
                using (CvWindow window_image = new CvWindow("image", img))
                using (CvWindow window_contours = new CvWindow("contours"))
                {
                    CvTrackbarCallback onTrackbar = delegate(int pos)
                    {
                        IplImage cnt_img = new IplImage(SIZE, SIZE, BitDepth.U8, 3);
                        CvSeq<CvPoint> _contours = contours;
                        int levels = pos - 3;
                        if (levels <= 0) // get to the nearest face to make it look more funny
                        {
                            //_contours = _contours.HNext.HNext.HNext;
                        }
                        cnt_img.Zero();
                        Cv.DrawContours(cnt_img, _contours, CvColor.Red, CvColor.Green, levels, 3, LineType.AntiAlias);
                        window_contours.ShowImage(cnt_img);
                        cnt_img.Dispose();
                    };
                    window_contours.CreateTrackbar("levels+3", 3, 7, onTrackbar);
                    onTrackbar(3);

                    Cv.WaitKey();
                }
            }

        }
开发者ID:neoxeo,项目名称:opencvsharp,代码行数:75,代码来源:FindContours.cs

示例3: FindSquares4


//.........这里部分代码省略.........
            IplImage pyr = new IplImage(sz.Width / 2, sz.Height / 2, BitDepth.U8, 3);
            // create empty sequence that will contain points -
            // 4 points per square (the square's vertices)
            CvSeq<CvPoint> squares = new CvSeq<CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage);

            // select the maximum ROI in the image
            // with the width and height divisible by 2
            timg.ROI = new CvRect(0, 0, sz.Width, sz.Height);

            // down-Scale and upscale the image to filter out the noise
            Cv.PyrDown(timg, pyr, CvFilter.Gaussian5x5);
            Cv.PyrUp(pyr, timg, CvFilter.Gaussian5x5);
            IplImage tgray = new IplImage(sz, BitDepth.U8, 1);

            // find squares in every color plane of the image
            for (int c = 0; c < 3; c++)
            {
                // extract the c-th color plane
                timg.COI = c + 1;
                Cv.Copy(timg, tgray, null);

                // try several threshold levels
                for (int l = 0; l < N; l++)
                {
                    // hack: use Canny instead of zero threshold level.
                    // Canny helps to catch squares with gradient shading   
                    if (l == 0)
                    {
                        // apply Canny. Take the upper threshold from slider
                        // and set the lower to 0 (which forces edges merging) 
                        Cv.Canny(tgray, gray, 0, Thresh, ApertureSize.Size5);
                        // dilate canny output to remove potential
                        // holes between edge segments 
                        Cv.Dilate(gray, gray, null, 1);
                    }
                    else
                    {
                        // apply threshold if l!=0:
                        //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                        Cv.Threshold(tgray, gray, (l + 1) * 255.0 / N, 255, ThresholdType.Binary);
                    }

                    // find contours and store them all as a list
                    CvSeq<CvPoint> contours;
                    Cv.FindContours(gray, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxSimple, new CvPoint(0, 0));

                    // test each contour
                    while (contours != null)
                    {
                        // approximate contour with accuracy proportional
                        // to the contour perimeter
                        CvSeq<CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false);
                        // square contours should have 4 vertices after approximation
                        // relatively large area (to filter out noisy contours)
                        // and be convex.
                        // Note: absolute value of an area is used because
                        // area may be positive or negative - in accordance with the
                        // contour orientation
                        if (result.Total == 4 && Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 1000 && result.CheckContourConvexity())
                        {
                            double s = 0;

                            for (int i = 0; i < 5; i++)
                            {
                                // find minimum Angle between joint
                                // edges (maximum of cosine)
                                if (i >= 2)
                                {
                                    double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value));
                                    s = s > t ? s : t;
                                }
                            }

                            // if cosines of all angles are small
                            // (all angles are ~90 degree) then write quandrange
                            // vertices to resultant sequence 
                            if (s < 0.3)
                            {
                                for (int i = 0; i < 4; i++)
                                {
                                    //Console.WriteLine(result[i]);
                                    squares.Push(result[i].Value);
                                }
                            }
                        }

                        // take the next contour
                        contours = contours.HNext;
                    }
                }
            }

            // release all the temporary images
            gray.Dispose();
            pyr.Dispose();
            tgray.Dispose();
            timg.Dispose();

            return squares.ToArray();
        }
开发者ID:qxp1011,项目名称:opencvsharp,代码行数:101,代码来源:Squares.cs

示例4: Main

        static void Main(string[] args)
        {
            //  CreateCameraCaptureの引数はカメラのIndex(通常は0から始まる)
            using (var capture = Cv.CreateCameraCapture(0))
            {
                Console.WriteLine("Hit any key to quit");

                /*
                double fps=12.0;
                int interval=1;
                double zoom=1.0;
                string OutputFile;
                */

                double fps ;
                int interval ;
                double zoom=1.0 ;
                string OutputFile;

                var opts = new Options();
                 bool isSuccess = CommandLine.Parser.Default.ParseArguments(args, opts);

                if(!isSuccess)
                {
                    opts.GetUsage();
                    Console.WriteLine(Environment.GetCommandLineArgs()[0] + "  -o Outputfilename(string) -f fps(double) -i CaptureInterval(int)");
                    Environment.Exit(0);
                }

                    fps = opts.fps;
                    interval = opts.interval;
                    zoom = opts.zoom;
                    OutputFile = opts.OutputFile;
                    Console.WriteLine(OutputFile);
                    if (fps > 30 | interval < 0.1)
                    {
                        Console.WriteLine(" :-p");
                        Environment.Exit(1);
                    }

                Int32 codec = 0; // コーデック(AVI)
                IplImage frame = new IplImage();

                /*
                double width = capture.FrameWidth/2;
                double height = capture.FrameHeight/2;

                //double width = 640, height = 240;
                Cv.SetCaptureProperty(capture, CaptureProperty.FrameWidth, width);
                Cv.SetCaptureProperty(capture, CaptureProperty.FrameHeight, height);
                CvSize size = new CvSize((int)width, (int)height);
                CvVideoWriter vw = new CvVideoWriter(OutputFile, codec, fps, size, true);
                */

                int width = (int)(Cv.GetCaptureProperty(capture, CaptureProperty.FrameWidth)*zoom);
                int height = (int)(Cv.GetCaptureProperty(capture, CaptureProperty.FrameHeight)*zoom);

                //Cv.SetCaptureProperty(capture, CaptureProperty.FrameWidth, width);
                //Cv.SetCaptureProperty(capture, CaptureProperty.FrameWidth, height);
                //Bitmap bitmap = new Bitmap(width, height);

                CvSize size = new CvSize(width, height);
                CvVideoWriter vw = new CvVideoWriter(OutputFile, codec, fps, size, true);

                //CvFont font = new CvFont(FontFace.HersheyTriplex, 0.7, 0.7);
                //(FontFace.HersheyPlain, 1.0, 1.0, 0, 2);

                double fontSize;
                if(width>600)
                     fontSize=1.0;
                else
                     fontSize=0.5;

                CvFont font = new CvFont(FontFace.HersheyPlain,fontSize,fontSize);

                //  何かキーを押すまでは、Webカメラの画像を表示し続ける
                while (Cv.WaitKey(1) == -1)
                {
                    System.Threading.Thread.Sleep(1000*interval);
                    //  カメラからフレームを取得
                    frame = Cv.QueryFrame(capture);
                    string str = DateTime.Now.ToString();

                    //  Window「Capture」を作って、Webカメラの画像を表示
                    if (frame != null)
                    {
                        frame.PutText(str, new CvPoint(10, 20), font, new CvColor(200,100,50));
                        Cv.ShowImage("Timelapse", frame);
                        //frame.SaveImage("result.bmp");
                       //bitmap = BitmapConverter.ToBitmap(frame);
                        //OpenCvSharp.IplImage ipl2 = (OpenCvSharp.IplImage)BitmapConverter.ToIplImage(bitmap);
                        vw.WriteFrame(frame);
                        // vw.WriteFrame(ipl2);
                        frame.Dispose();
                    }
                }

                Cv.DestroyWindow("Capture");
                vw.Dispose();
            }
//.........这里部分代码省略.........
开发者ID:HyperInfo,项目名称:TimeLaps,代码行数:101,代码来源:Program.cs

示例5: Recognize

        /// <summary>
        /// 認識処理を行う
        /// </summary>
        /// <param name="imagePath">認識対象の画像パス</param>
        /// <param name="isDebug">デバッグモード</param>
        /// <returns></returns>
        public static String Recognize(String imagePath, bool isDebug = false)
        {
            List<String> results = new List<string>();

            // 検出対象の画像を読み込み
            IplImage src = new IplImage(imagePath, LoadMode.GrayScale);

            using (IplImage tmpImage = new IplImage(src.Size, BitDepth.U8, 1))
            {
                // 1)検出前処理

                // エッジ強調
                src.UnsharpMasking(src, 3);

                // 大津の手法による二値化処理
                // 大津, "判別および最小2乗基準に基づく自動しきい値選定法", 電子通信学会論文誌, Vol.J63-D, No.4, pp.349-356, 1980.
                src.Threshold(tmpImage, 200, 250, ThresholdType.Otsu);

                src.Dispose();

                Dictionary<int, List<double>> shapeMatchResults = new Dictionary<int, List<double>>();

                List<string> answerFileNames = washTagDictionary.Keys.ToList();
                foreach (var answerFileName in answerFileNames)
                {
                    var washTagInfo = washTagDictionary[answerFileName];
                    var answerImagePath = String.Format(@"answer\{0}.png", answerFileName);

                    // 2) 検出処理
                    var resultSURF = SURF(tmpImage, answerImagePath, isDebug);

                    // 3) 検出候補の評価
                    string result = null;

                    // その1:頂点がある場合
                    if (resultSURF.dstCorners != null)
                    {
                        // TODO:平面評価
                        //result = fileBaseName + " : " + washTagDictionary[fileBaseName];
                    }

                    // その2:形状マッチング
                    if (result == null && resultSURF.findPointList.Count > 0)
                    {
                        // ROIの1辺は、横に4つ位入る大きさで(何となくw)
                        CvSize roiSize = new CvSize(tmpImage.Width / 4, tmpImage.Width / 4);

                        List<double> matchResults = new List<double>();
                        foreach (var findPoint in resultSURF.findPointList)
                        {
                            // ROIを設定
                            tmpImage.SetROI(
                                (int)findPoint.Pt.X - roiSize.Width / 2,
                                (int)findPoint.Pt.Y - roiSize.Height / 2,
                                roiSize.Width, roiSize.Height
                            );
                            // Huモーメントによる形状マッチング [回転・スケーリング・反転に強い]
                            matchResults.Add(
                                CompareShapeMoment(tmpImage, answerImagePath, MatchShapesMethod.I1)
                            );
                            // ROIをリセット
                            tmpImage.ResetROI();
                        }

                        // 閾値以下だった場合に検出と見なす
                        if (matchResults.Min() < 0.005)
                        {
                            // カテゴリに値が無ければ確保
                            if (shapeMatchResults.ContainsKey(washTagInfo.CategoryNo) == false)
                            {
                                shapeMatchResults.Add(washTagInfo.CategoryNo, new List<double>());
                            }

                            shapeMatchResults[washTagInfo.CategoryNo].Add(matchResults.Min());
                        }
                    }
                }

                // 4)認識結果の整理
                foreach (var categoryNo in shapeMatchResults.Keys)
                {
                    var matchResult = shapeMatchResults[categoryNo];

                    var min = matchResult.Min();
                    var index = matchResult.FindIndex((x) =>
                    {
                        return x == min;
                    });

                    var id = String.Format("{0:0}{1:00}", categoryNo, index + 1);
                    var recognitionWashTag = washTagDictionary[id];

                    // 結果を格納
                    results.Add(
//.........这里部分代码省略.........
开发者ID:Gakuto,项目名称:ImageRecognizationTest,代码行数:101,代码来源:WashTagRecognize.cs

示例6: GetTrainingExample

 public static IplImage GetTrainingExample(System.Drawing.Size size, string fileName)
 {
     IplImage src = new IplImage(fileName);
     IplImage dst = new IplImage(src.Size, src.Depth, src.NChannels);
     Cv.Resize(src, dst, Interpolation.Linear);
     src.Dispose();
     return dst;
 }
开发者ID:pyephyomaung,项目名称:som-ar,代码行数:8,代码来源:OpenCVSharpHelper.cs


注:本文中的OpenCvSharp.IplImage.Dispose方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。