本文整理汇总了C#中OpenCvSharp.CvMemStorage类的典型用法代码示例。如果您正苦于以下问题:C# CvMemStorage类的具体用法?C# CvMemStorage怎么用?C# CvMemStorage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CvMemStorage类属于OpenCvSharp命名空间,在下文中一共展示了CvMemStorage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: LatentSVM
public LatentSVM()
{
using (var detector = new CvLatentSvmDetector(FilePath.Text.LatentSvmCat))
using (var imageSrc = new IplImage(FilePath.Image.Cat, LoadMode.Color))
using (var imageDst = imageSrc.Clone())
using (var storage = new CvMemStorage())
{
Console.WriteLine("Running LatentSVM...");
Stopwatch watch = Stopwatch.StartNew();
CvSeq<CvObjectDetection> result = detector.DetectObjects(imageSrc, storage, 0.5f, 2);
watch.Stop();
Console.WriteLine("Elapsed time: {0}ms", watch.ElapsedMilliseconds);
foreach (CvObjectDetection detection in result)
{
CvRect boundingBox = detection.Rect;
imageDst.Rectangle(
new CvPoint(boundingBox.X, boundingBox.Y),
new CvPoint(boundingBox.X + boundingBox.Width, boundingBox.Y + boundingBox.Height),
CvColor.Red, 3);
}
using (new CvWindow("LatentSVM result", imageDst))
{
Cv.WaitKey();
}
}
}
示例2: Squares
public Squares()
{
// create memory storage that will contain all the dynamic data
CvMemStorage storage = new CvMemStorage(0);
for (int i = 0; i < _names.Length; i++)
{
// load i-th image
using (IplImage img = new IplImage(_names[i], LoadMode.Color))
{
// create window and a trackbar (slider) with parent "image" and set callback
// (the slider regulates upper threshold, passed to Canny edge detector)
Cv.NamedWindow(WindowName, WindowMode.AutoSize);
// find and draw the squares
DrawSquares(img, FindSquares4(img, storage));
}
// clear memory storage - reset free space position
storage.Clear();
// wait for key.
// Also the function cvWaitKey takes care of event processing
int c = Cv.WaitKey(0);
if ((char)c == 27)
break;
}
Cv.DestroyWindow(WindowName);
}
示例3: DetectFace
/// <summary>
/// 画像から顔を見つける
/// </summary>
/// <param name="file_name"></param>
/// <param name="read_count"></param>
private void DetectFace(String file_name,int read_count)
{
//カスケード分類器の特徴量を取得する
CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(@"C:\opencv2.4.8\sources\data\haarcascades\haarcascade_frontalface_alt.xml");
CvMemStorage strage = new CvMemStorage(0); // メモリを確保
using (IplImage img = new IplImage(file_name))
{
//グレースケールに変換
using (IplImage gray_image = Cv.CreateImage(new CvSize(img.Width, img.Height), BitDepth.U8, 1))
{
Cv.CvtColor(img, gray_image, ColorConversion.BgrToGray);
//発見した矩形
var result = Cv.HaarDetectObjects(gray_image, cascade, strage);
for (int i = 0; i < result.Total; i++)
{
//矩形の大きさに書き出す
CvRect rect = result[i].Value.Rect;
Cv.Rectangle(img, rect, new CvColor(255, 0, 0));
//矩形部分をファイル出力する
img.ROI = rect;
string out_name = this.OutputFoldaName + @"\out" + read_count + @"_" + i + @".bmp";
Cv.SaveImage(out_name, img);
}
}
}
}
示例4: SeqPartition
/// <summary>
///
/// </summary>
public SeqPartition()
{
CvMemStorage storage = new CvMemStorage(0);
pointSeq = new CvSeq<CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage);
Random rand = new Random();
canvas = new IplImage(Width, Height, BitDepth.U8, 3);
colors = new CvScalar[Count];
for (int i = 0; i < Count; i++)
{
CvPoint pt = new CvPoint
{
X = rand.Next(Width),
Y = rand.Next(Height)
};
pointSeq.Push(pt);
int icolor = rand.Next() | 0x00404040;
colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255);
}
using (window = new CvWindowEx() { Text = "points" })
{
window.CreateTrackbar("threshold", 10, 50, OnTrack);
OnTrack(10);
CvWindowEx.WaitKey();
}
}
示例5: HoughCircles
public HoughCircles()
{
using (IplImage imgSrc = new IplImage(Const.ImageWalkman, LoadMode.Color))
using (IplImage imgGray = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgHough = imgSrc.Clone())
{
Cv.CvtColor(imgSrc, imgGray, ColorConversion.BgrToGray);
Cv.Smooth(imgGray, imgGray, SmoothType.Gaussian, 9);
//Cv.Canny(imgGray, imgGray, 75, 150, ApertureSize.Size3);
using (CvMemStorage storage = new CvMemStorage())
{
CvSeq<CvCircleSegment> seq = imgGray.HoughCircles(storage, HoughCirclesMethod.Gradient, 1, 100, 150, 55, 0, 0);
foreach (CvCircleSegment item in seq)
{
imgHough.Circle(item.Center, (int)item.Radius, CvColor.Red, 3);
}
}
// (5)検出結果表示用のウィンドウを確保し表示する
using (new CvWindow("gray", WindowMode.AutoSize, imgGray))
using (new CvWindow("Hough circles", WindowMode.AutoSize, imgHough))
{
CvWindow.WaitKey(0);
}
}
}
示例6: FindMostLengthHole
/// <summary>
/// Находит контуры на изображении и выбирает из них самый длинный контур являющийся границей дырки
/// </summary>
/// <param name="image">Изображение на котором будем искать контуры</param>
/// <returns>Результат поиска</returns>
public CvPoint[] FindMostLengthHole(IplImage image)
{
CvMemStorage contours = new CvMemStorage();
CvSeq<CvPoint> firstContour, mostLengthContour = null;
double maxContourLength = 0, perim = 0;
// Отделяем изображение от фона
separateBackground(image, tmpImg);
// Находим все контуры на изображении
Cv.FindContours(tmpImg, contours, out firstContour, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxNone);
// Если не найдено ни одного контура
if (firstContour == null) return new CvPoint[0];
// Ищем самый длинный контур
for (CvSeq<CvPoint> currentContour = firstContour; currentContour.HNext != null; currentContour = currentContour.HNext)
{
if (isHole(currentContour))
{
perim = Cv.ContourPerimeter(currentContour);
if (perim >= maxContourLength)
{
maxContourLength = perim;
mostLengthContour = currentContour;
}
}
}
// Если не найдено ни одной дырки
if (mostLengthContour == null) return new CvPoint[0];
return mostLengthContour.ToArray();
}
示例7: FaceDetect
public FaceDetect()
{
CheckMemoryLeak();
// CvHaarClassifierCascade, cvHaarDetectObjects
CvColor[] colors = new CvColor[]{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double Scale = 1.14;
const double ScaleFactor = 1.0850;
const int MinNeighbors = 2;
using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
{
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))
using (var storage = new CvMemStorage())
{
storage.Clear();
// 顔の検出
Stopwatch watch = Stopwatch.StartNew();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
watch.Stop();
Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);
// 検出した箇所にまるをつける
for (int i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * Scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
}
}
// ウィンドウに表示
CvWindow.ShowImages(img);
}
}
示例8: Contour
public Contour()
{
// cvContourArea, cvArcLength
// 輪郭によって区切られた領域の面積と,輪郭の長さを求める
const int SIZE = 500;
// (1)画像を確保し初期化する
using (CvMemStorage storage = new CvMemStorage())
using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
{
img.Zero();
// (2)点列を生成する
CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.PolyLine, storage);
CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);
double scale = rng.RandReal() + 0.5;
CvPoint pt0 = new CvPoint
{
X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
};
img.Circle(pt0, 2, CvColor.Green);
points.Push(pt0);
for (int i = 1; i < 20; i++)
{
scale = rng.RandReal() + 0.5;
CvPoint pt1 = new CvPoint
{
X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
};
img.Line(pt0, pt1, CvColor.Green, 2);
pt0.X = pt1.X;
pt0.Y = pt1.Y;
img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
points.Push(pt0);
}
img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
// (3)包含矩形,面積,長さを求める
CvRect rect = points.BoundingRect(false);
double area = points.ContourArea();
double length = points.ArcLength(CvSlice.WholeSeq, 1);
// (4)結果を画像に書き込む
img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
string text_area = string.Format("Area: wrect={0}, contour={1}", rect.Width * rect.Height, area);
string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
{
img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
}
// (5)画像を表示,キーが押されたときに終了
using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
{
window.Image = img;
CvWindow.WaitKey(0);
}
}
}
示例9: FaceDetect
public System.Drawing.Bitmap FaceDetect(IplImage src)
{
// CvHaarClassifierCascade, cvHaarDetectObjects
// 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다
CvColor[] colors = new CvColor[]{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double scale = 1.04;
const double scaleFactor = 1.139;
const int minNeighbors = 1;
using (IplImage img = src.Clone())
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1))
{
// 얼굴 검출을 위한 화상을 생성한다.
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Environment.CurrentDirectory + "\\" + "haarcascade_frontalface_alt.xml"))
using (CvMemStorage storage = new CvMemStorage())
{
storage.Clear();
// 얼굴을 검출한다.
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(20, 20));
// 검출한 얼굴에 검은색 원을 덮어씌운다.
for (int i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
img.Circle(center, radius, new CvColor(0, 0, 0), -1, LineType.Link8, 0);
}
}
FindFace = img.Clone();
//생성한 IplImage 화상을 비트맵으로 변환해 반환한다.
return FindFace.ToBitmap(System.Drawing.Imaging.PixelFormat.Format24bppRgb);
}
}
示例10: CvSet
/// <summary>
/// 空のセットを生成する
/// </summary>
/// <param name="setFlags">生成するセットのタイプ. </param>
/// <param name="headerSize">セットのヘッダのサイズ(sizeof(CvSet)以上). </param>
/// <param name="elemSize">セットの要素のサイズ(CvSetElem 以上). </param>
/// <param name="storage">セットのためのコンテナ. </param>
#else
/// <summary>
/// Creates empty set
/// </summary>
/// <param name="setFlags">Type of the created set. </param>
/// <param name="headerSize">Set header size; may not be less than sizeof(CvSet). </param>
/// <param name="elemSize">Set element size; may not be less than CvSetElem. </param>
/// <param name="storage">Container for the set. </param>
#endif
public CvSet(SeqType setFlags, int headerSize, int elemSize, CvMemStorage storage)
{
if (storage == null)
throw new ArgumentNullException();
IntPtr p = NativeMethods.cvCreateSet(setFlags, headerSize, elemSize, storage.CvPtr);
Initialize(p);
holdingStorage = storage;
}
示例11: CvGraph
/// <summary>
/// 空のグラフを生成する
/// </summary>
/// <param name="graphFlags">生成したグラフのタイプ.無向グラフの場合,CV_SEQ_KIND_GRAPH,有向グラフの場合,CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED. </param>
/// <param name="headerSize">グラフのヘッダサイズ (sizeof(CvGraph)以上)</param>
/// <param name="vtxSize">グラフの頂点サイズ</param>
/// <param name="edgeSize">グラフの辺サイズ</param>
/// <param name="storage">グラフコンテナ</param>
#else
/// <summary>
/// Creates empty graph
/// </summary>
/// <param name="graphFlags">Type of the created graph. Usually, it is either CV_SEQ_KIND_GRAPH for generic unoriented graphs and CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED for generic oriented graphs. </param>
/// <param name="headerSize">Graph header size; may not be less than sizeof(CvGraph).</param>
/// <param name="vtxSize">Graph vertex size; the custom vertex structure must start with CvGraphVtx (use CV_GRAPH_VERTEX_FIELDS()) </param>
/// <param name="edgeSize">Graph edge size; the custom edge structure must start with CvGraphEdge (use CV_GRAPH_EDGE_FIELDS()) </param>
/// <param name="storage">The graph container. </param>
/// <remarks>The function cvCreateGraph creates an empty graph and returns it.</remarks>
#endif
public CvGraph(SeqType graphFlags, int vtxSize, int edgeSize, CvMemStorage storage, int headerSize)
{
if (storage == null)
throw new ArgumentNullException();
IntPtr p = NativeMethods.cvCreateGraph(graphFlags, headerSize, vtxSize, edgeSize, storage.CvPtr);
Initialize(p);
holdingStorage = storage;
}
示例12: Update
// Update is called once per frame
void Update()
{
IplImage frame = Cv.QueryFrame(capture);
using (IplImage img = Cv.CloneImage(frame))
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
{
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using (CvMemStorage storage = new CvMemStorage())
{
storage.Clear();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(64, 64));
for (int i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * Scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
}
if (faces.Total > 0)
{
CvRect r = faces[0].Value.Rect;
facepos = new Vector2((r.X + r.Width / 2.0f) / CAPTURE_WIDTH, (r.Y + r.Height / 2.0f) / CAPTURE_HEIGHT);
}
else
{
facepos = Vector2.zero;
}
if(facepos.x >= 0.2 && facepos.x <= 0.7 && facepos.y >= 0.2 && facepos.x <= 0.7)
{
isFaceInCapture = true;
}
else
{
isFaceInCapture = false;
}
}
Cv.ShowImage("FaceDetect", img);
}
}
示例13: CvSet
/// <summary>
/// 空のセットを生成する
/// </summary>
/// <param name="set_flags">生成するセットのタイプ. </param>
/// <param name="header_size">セットのヘッダのサイズ(sizeof(CvSet)以上). </param>
/// <param name="elem_size">セットの要素のサイズ(CvSetElem 以上). </param>
/// <param name="storage">セットのためのコンテナ. </param>
#else
/// <summary>
/// Creates empty set
/// </summary>
/// <param name="set_flags">Type of the created set. </param>
/// <param name="header_size">Set header size; may not be less than sizeof(CvSet). </param>
/// <param name="elem_size">Set element size; may not be less than CvSetElem. </param>
/// <param name="storage">Container for the set. </param>
#endif
public CvSet(SeqType set_flags, int header_size, int elem_size, CvMemStorage storage)
{
if (storage == null)
{
throw new ArgumentNullException();
}
IntPtr ptr = CvInvoke.cvCreateSet(set_flags, header_size, elem_size, storage.CvPtr);
Initialize(ptr);
this._storage = storage;
}
示例14: CvGraph
/// <summary>
/// 空のグラフを生成する
/// </summary>
/// <param name="graph_flags">生成したグラフのタイプ.無向グラフの場合,CV_SEQ_KIND_GRAPH,有向グラフの場合,CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED. </param>
/// <param name="header_size">グラフのヘッダサイズ (sizeof(CvGraph)以上)</param>
/// <param name="vtx_size">グラフの頂点サイズ</param>
/// <param name="edge_size">グラフの辺サイズ</param>
/// <param name="storage">グラフコンテナ</param>
#else
/// <summary>
/// Creates empty graph
/// </summary>
/// <param name="graph_flags">Type of the created graph. Usually, it is either CV_SEQ_KIND_GRAPH for generic unoriented graphs and CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED for generic oriented graphs. </param>
/// <param name="header_size">Graph header size; may not be less than sizeof(CvGraph).</param>
/// <param name="vtx_size">Graph vertex size; the custom vertex structure must start with CvGraphVtx (use CV_GRAPH_VERTEX_FIELDS()) </param>
/// <param name="edge_size">Graph edge size; the custom edge structure must start with CvGraphEdge (use CV_GRAPH_EDGE_FIELDS()) </param>
/// <param name="storage">The graph container. </param>
/// <remarks>The function cvCreateGraph creates an empty graph and returns it.</remarks>
#endif
public CvGraph(SeqType graph_flags, int vtx_size, int edge_size, CvMemStorage storage, int header_size)
{
if (storage == null)
{
throw new ArgumentNullException();
}
IntPtr ptr = CvInvoke.cvCreateGraph(graph_flags, header_size, vtx_size, edge_size, storage.CvPtr);
Initialize(ptr);
this._storage = storage;
}
示例15: ConvexityDefect
public ConvexityDefect()
{
using (IplImage imgSrc = new IplImage(Const.ImageHand, LoadMode.Color))
using (IplImage imgHSV = new IplImage(imgSrc.Size, BitDepth.U8, 3))
using (IplImage imgH = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgS = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgV = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgBackProjection = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgFlesh = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgHull = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgDefect = new IplImage(imgSrc.Size, BitDepth.U8, 3))
using (IplImage imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3))
using (CvMemStorage storage = new CvMemStorage())
{
// RGB -> HSV
Cv.CvtColor(imgSrc, imgHSV, ColorConversion.BgrToHsv);
Cv.CvtPixToPlane(imgHSV, imgH, imgS, imgV, null);
IplImage[] hsvPlanes = { imgH, imgS, imgV };
// 肌色領域を求める
RetrieveFleshRegion(imgSrc, hsvPlanes, imgBackProjection);
// 最大の面積の領域を残す
FilterByMaximalBlob(imgBackProjection, imgFlesh);
Interpolate(imgFlesh);
// 輪郭を求める
CvSeq<CvPoint> contours = FindContours(imgFlesh, storage);
if (contours != null)
{
Cv.DrawContours(imgContour, contours, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias);
// 凸包を求める
int[] hull;
Cv.ConvexHull2(contours, out hull, ConvexHullOrientation.Clockwise);
Cv.Copy(imgFlesh, imgHull);
DrawConvexHull(contours, hull, imgHull);
// 凹状欠損を求める
Cv.Copy(imgContour, imgDefect);
CvSeq<CvConvexityDefect> defect = Cv.ConvexityDefects(contours, hull);
DrawDefects(imgDefect, defect);
}
using (new CvWindow("src", imgSrc))
using (new CvWindow("back projection", imgBackProjection))
using (new CvWindow("hull", imgHull))
using (new CvWindow("defect", imgDefect))
{
Cv.WaitKey();
}
}
}