本文整理汇总了C#中CvPoint类的典型用法代码示例。如果您正苦于以下问题:C# CvPoint类的具体用法?C# CvPoint怎么用?C# CvPoint使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CvPoint类属于命名空间,在下文中一共展示了CvPoint类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: SeqPartition
/// <summary>
///
/// </summary>
public SeqPartition()
{
CvMemStorage storage = new CvMemStorage(0);
pointSeq = new CvSeq<CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage);
Random rand = new Random();
canvas = new IplImage(Width, Height, BitDepth.U8, 3);
colors = new CvScalar[Count];
for (int i = 0; i < Count; i++)
{
CvPoint pt = new CvPoint
{
X = rand.Next(Width),
Y = rand.Next(Height)
};
pointSeq.Push(pt);
int icolor = rand.Next() | 0x00404040;
colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255);
}
using (window = new CvWindowEx() { Text = "points" })
{
window.CreateTrackbar("threshold", 10, 50, OnTrack);
OnTrack(10);
CvWindowEx.WaitKey();
}
}
示例2: InitializeComponent
public 描画画面()
{
InitializeComponent();
dis_height= System.Windows.Forms.Screen.PrimaryScreen.Bounds.Height;
dis_width=System.Windows.Forms.Screen.PrimaryScreen.Bounds.Width;
pos_max = Tobii.pos_max;
while (Tobii. 眼球位置_L[0] == 0 || Tobii. 眼球位置_R[0] == 100) { }//両目とれるまでここにとどまる
diff_in = Tobii. 眼球位置_R[0]-Tobii. 眼球位置_L[0];
posY_in = (Tobii.眼球位置_L[1] + Tobii.眼球位置_R[1] )/ 2;
pictureBoxIpl1.Width = dis_width;
pictureBoxIpl1.Height = dis_height;
frame = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
background = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
background=メイン画面.background;
pictureBoxIpl1.ImageIpl = background;
window_size = new CvSize(メイン画面.window[0], メイン画面.window[1]);
point_old = new CvPoint(window_size.Width / 2, window_size.Height / 2);
許容半径 = メイン画面.radius;
PC=new System.Diagnostics.PerformanceCounter[3];
タイマー開始();
}
示例3: FaceDetect
public FaceDetect()
{
CheckMemoryLeak();
// CvHaarClassifierCascade, cvHaarDetectObjects
CvColor[] colors = new CvColor[]{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double Scale = 1.14;
const double ScaleFactor = 1.0850;
const int MinNeighbors = 2;
using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
{
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))
using (var storage = new CvMemStorage())
{
storage.Clear();
// 顔の検出
Stopwatch watch = Stopwatch.StartNew();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
watch.Stop();
Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);
// 検出した箇所にまるをつける
for (int i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * Scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
}
}
// ウィンドウに表示
CvWindow.ShowImages(img);
}
}
示例4: Snake
public Snake()
{
using (IplImage src = new IplImage(Const.ImageCake, LoadMode.GrayScale))
using (IplImage dst = new IplImage(src.Size, BitDepth.U8, 3))
{
CvPoint[] contour = new CvPoint[100];
CvPoint center = new CvPoint(src.Width / 2, src.Height / 2);
for (int i = 0; i < contour.Length; i++)
{
contour[i].X = (int)(center.X * Math.Cos(2 * Math.PI * i / contour.Length) + center.X);
contour[i].Y = (int)(center.Y * Math.Sin(2 * Math.PI * i / contour.Length) + center.Y);
}
Console.WriteLine("Press any key to snake\nEsc - quit");
using (CvWindow w = new CvWindow())
{
while (true)
{
src.SnakeImage(contour, 0.45f, 0.35f, 0.2f, new CvSize(15, 15), new CvTermCriteria(1), true);
src.CvtColor(dst, ColorConversion.GrayToRgb);
for (int i = 0; i < contour.Length - 1; i++)
{
dst.Line(contour[i], contour[i + 1], new CvColor(255, 0, 0), 2);
}
dst.Line(contour[contour.Length - 1], contour[0], new CvColor(255, 0, 0), 2);
w.Image = dst;
int key = CvWindow.WaitKey();
if (key == 27)
{
break;
}
}
}
}
}
示例5: Contour
public Contour()
{
// cvContourArea, cvArcLength
// 輪郭によって区切られた領域の面積と,輪郭の長さを求める
const int SIZE = 500;
// (1)画像を確保し初期化する
using (CvMemStorage storage = new CvMemStorage())
using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
{
img.Zero();
// (2)点列を生成する
CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.PolyLine, storage);
CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);
double scale = rng.RandReal() + 0.5;
CvPoint pt0 = new CvPoint
{
X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
};
img.Circle(pt0, 2, CvColor.Green);
points.Push(pt0);
for (int i = 1; i < 20; i++)
{
scale = rng.RandReal() + 0.5;
CvPoint pt1 = new CvPoint
{
X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
};
img.Line(pt0, pt1, CvColor.Green, 2);
pt0.X = pt1.X;
pt0.Y = pt1.Y;
img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
points.Push(pt0);
}
img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
// (3)包含矩形,面積,長さを求める
CvRect rect = points.BoundingRect(false);
double area = points.ContourArea();
double length = points.ArcLength(CvSlice.WholeSeq, 1);
// (4)結果を画像に書き込む
img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
string text_area = string.Format("Area: wrect={0}, contour={1}", rect.Width * rect.Height, area);
string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
{
img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
}
// (5)画像を表示,キーが押されたときに終了
using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
{
window.Image = img;
CvWindow.WaitKey(0);
}
}
}
示例6: Angle
/// <summary>
/// helper function:
/// finds a cosine of Angle between vectors
/// from pt0->pt1 and from pt0->pt2
/// </summary>
/// <param name="pt1"></param>
/// <param name="pt2"></param>
/// <param name="pt0"></param>
/// <returns></returns>
static double Angle(CvPoint pt1, CvPoint pt2, CvPoint pt0)
{
double dx1 = pt1.X - pt0.X;
double dy1 = pt1.Y - pt0.Y;
double dx2 = pt2.X - pt0.X;
double dy2 = pt2.Y - pt0.Y;
return (dx1 * dx2 + dy1 * dy2) / Math.Sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}
示例7: Watershed
public Watershed()
{
// cvWatershed
// マウスで円形のマーカー(シード領域)の中心を指定し,複数のマーカーを設定する.
// このマーカを画像のgradientに沿って広げて行き,gradientの高い部分に出来る境界を元に領域を分割する.
// 領域は,最初に指定したマーカーの数に分割される.
// (2)画像の読み込み,マーカー画像の初期化,結果表示用画像領域の確保を行なう
using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImg = srcImg.Clone())
using (IplImage dspImg = srcImg.Clone())
using (IplImage markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
{
markers.Zero();
// (3)入力画像を表示しシードコンポーネント指定のためのマウスイベントを登録する
using (CvWindow wImage = new CvWindow("image", WindowMode.AutoSize))
{
wImage.Image = srcImg;
// クリックにより中心を指定し,円形のシード領域を設定する
int seedNum = 0;
wImage.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
{
if (ev == MouseEvent.LButtonDown)
{
seedNum++;
CvPoint pt = new CvPoint(x, y);
markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
wImage.Image = dspImg;
}
};
CvWindow.WaitKey();
}
// (4)watershed分割を実行する
Cv.Watershed(srcImg, markers);
// (5)実行結果の画像中のwatershed境界(ピクセル値=-1)を結果表示用画像上に表示する
for (int i = 0; i < markers.Height; i++)
{
for (int j = 0; j < markers.Width; j++)
{
int idx = (int)(markers.Get2D(i, j).Val0);
if (idx == -1)
{
dstImg.Set2D(i, j, CvColor.Red);
}
}
}
using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
{
wDst.Image = dstImg;
CvWindow.WaitKey();
}
}
}
示例8: GetAngle
// !!!!!!!!!!!!!!!!Требуется отладка - похоже функция работает неправильно!
// Функция поиска угла прямой, заданной двумя точками P1 и P2
public static double GetAngle(CvPoint P1, CvPoint P2)
{
// считаем центральной точку находящуюся выше,
CvPoint aCurr, aPrev, aNext;
aCurr = (P1.Y < P2.Y) ? P1 : P2;
aPrev = (P1.Y < P2.Y) ? P2 : P1;
aNext.X = aCurr.X;
aNext.Y = 1000;
if (aCurr.X > aPrev.X) return -GetAngle(aCurr, aPrev, aNext);
else return GetAngle(aCurr, aPrev, aNext);
}
示例9: CvSize
private void OnClick_csv出力(object sender, EventArgs e)
{
if (合成画像 != null)
{
string 結果 = "";
int x,y;
int roi_w = 9;
int roi_h = 9;
CvSize roiSize = new CvSize(roi_w, roi_h);
CvPoint roiPoint;
for (x = 0; x < 合成画像.Width - roi_w; x++)
{
System.Diagnostics.Debug.WriteLine(x + "\n" + 結果);
for (y = 0; y < 合成画像.Height - roi_h; y++)
{
string buff = "";
string type = 検査領域か判断(x,y,roi_w,roi_h);
if (type != "")//ちょっと高速化
{
roiPoint = new CvPoint(x, y);
Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
if (type == "1") buff = csvフォーマットを取得(検査対象, roiSize, "1");
else if (type == "0") buff = csvフォーマットを取得(検査対象, roiSize, "0");
}
//if (checkBox_all.Checked)
//{
// roiPoint = new CvPoint(x, y);
// Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
// Cv.SetImageROI(マスク画像, new CvRect(roiPoint, roiSize));
// if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
// else if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
//}
//else if (checkBox_black.Checked)
//{
// if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
//}
//else
//{
// if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
//}
if(buff!="")結果 += buff + "\n";
Cv.ResetImageROI(マスク画像);
Cv.ResetImageROI(検査対象);
}
}
stringをcsv出力(結果,DateTime.Now.ToString("yy-MM-dd_")+this.Text);
}
}
示例10: Approximate
public static CvCircleSegment Approximate(CvPoint[] points)
{
CvPoint2D32f[] points2D32f = new CvPoint2D32f[points.Length];
for (int i = 0; i < points.Length; i++)
{
points2D32f[i].X = (float)points[i].X;
points2D32f[i].Y = (float)points[i].Y;
}
return Approximate(points2D32f);
}
示例11: Watershed
public Watershed()
{
using (var srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (var dstImg = srcImg.Clone())
using (var dspImg = srcImg.Clone())
using (var markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
{
markers.Zero();
using (var window = new CvWindow("image", WindowMode.AutoSize))
{
window.Image = srcImg;
// Mouse event
int seedNum = 0;
window.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
{
if (ev == MouseEvent.LButtonDown)
{
seedNum++;
CvPoint pt = new CvPoint(x, y);
markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
window.Image = dspImg;
}
};
CvWindow.WaitKey();
}
Cv.Watershed(srcImg, markers);
// draws watershed
for (int i = 0; i < markers.Height; i++)
{
for (int j = 0; j < markers.Width; j++)
{
int idx = (int)(markers.Get2D(i, j).Val0);
if (idx == -1)
{
dstImg.Set2D(i, j, CvColor.Red);
}
}
}
using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
{
wDst.Image = dstImg;
CvWindow.WaitKey();
}
}
}
示例12: ConvexHull
public ConvexHull()
{
using (IplImage img = Cv.CreateImage(new CvSize(500, 500), BitDepth.U8, 3))
using (CvWindow window = new CvWindow("hull"))
{
Random rand = new Random();
for (; ; )
{
int count = rand.Next() % 100 + 1;
// create sequence of random points
CvPoint[] ptseq = new CvPoint[count];
for (int i = 0; i < ptseq.Length; i++)
{
ptseq[i] = new CvPoint
{
X = rand.Next() % (img.Width / 2) + img.Width / 4,
Y = rand.Next() % (img.Height / 2) + img.Height / 4
};
}
// draw points
Cv.Zero(img);
foreach(CvPoint pt in ptseq)
{
Cv.Circle(img, pt, 2, new CvColor(255, 0, 0), -1);
}
// find hull
CvPoint[] hull;
Cv.ConvexHull2(ptseq, out hull, ConvexHullOrientation.Clockwise);
// draw hull
CvPoint pt0 = hull.Last();
foreach(CvPoint pt in hull)
{
Cv.Line(img, pt0, pt, CvColor.Green);
pt0 = pt;
}
window.ShowImage(img);
if (Cv.WaitKey(0) == 27) // 'ESC'
break;
}
}
}
示例13: SumLinePixelsManaged
/// <summary>
/// Calculate sum of line pixels (wrapper style)
/// </summary>
/// <param name="image"></param>
/// <param name="pt1"></param>
/// <param name="pt2"></param>
/// <returns></returns>
private CvScalar SumLinePixelsManaged(IplImage image, CvPoint pt1, CvPoint pt2)
{
double blue_sum = 0, green_sum = 0, red_sum = 0;
CvLineIterator iterator = new CvLineIterator(image, pt1, pt2, PixelConnectivity.Connectivity_8, false);
foreach (CvScalar pixel in iterator)
{
blue_sum += pixel.Val0; //blue_sum += iterator.ptr[0];
green_sum += pixel.Val1; //green_sum += iterator.ptr[1];
red_sum += pixel.Val2; //red_sum += iterator.ptr[2];
PrintCoordinate(image, iterator);
}
return new CvScalar(blue_sum, green_sum, red_sum);
}
示例14: BoundingRect
public BoundingRect()
{
// cvBoundingRect
// 点列を包含する矩形を求める
// (1)画像とメモリストレージを確保し初期化する
// (メモリストレージは、CvSeqを使わないのであれば不要)
using (IplImage img = new IplImage(640, 480, BitDepth.U8, 3))
using (CvMemStorage storage = new CvMemStorage(0))
{
img.Zero();
CvRNG rng = new CvRNG(DateTime.Now);
// (2)点列を生成する
///*
// お手軽な方法 (普通の配列を使う)
CvPoint[] points = new CvPoint[50];
for (int i = 0; i < 50; i++)
{
points[i] = new CvPoint()
{
X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4),
Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4)
};
img.Circle(points[i], 3, new CvColor(0, 255, 0), Cv.FILLED);
}
//*/
/*
// サンプルに準拠した方法 (CvSeqを使う)
CvSeq points = new CvSeq(SeqType.EltypePoint, CvSeq.SizeOf, CvPoint.SizeOf, storage);
for (int i = 0; i < 50; i++) {
CvPoint pt = new CvPoint();
pt.X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4);
pt.Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4);
points.Push(pt);
img.Circle(pt, 3, new CvColor(0, 255, 0), Cv.FILLED);
}
//*/
// (3)点列を包含する矩形を求めて描画する
CvRect rect = Cv.BoundingRect(points);
img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), new CvColor(255, 0, 0), 2);
// (4)画像の表示,キーが押されたときに終了
using (CvWindow w = new CvWindow("BoundingRect", WindowMode.AutoSize, img))
{
CvWindow.WaitKey(0);
}
}
}
示例15: SumLinePixelsNative
/// <summary>
/// Calculate sum of line pixels (native style)
/// </summary>
/// <param name="image"></param>
/// <param name="pt1"></param>
/// <param name="pt2"></param>
/// <returns></returns>
private CvScalar SumLinePixelsNative(IplImage image, CvPoint pt1, CvPoint pt2)
{
CvLineIterator iterator;
int blue_sum = 0, green_sum = 0, red_sum = 0;
int count = Cv.InitLineIterator(image, pt1, pt2, out iterator, PixelConnectivity.Connectivity_8, false);
for (int i = 0; i < count; i++)
{
blue_sum += Marshal.ReadByte(iterator.Ptr, 0); //blue_sum += iterator.ptr[0];
green_sum += Marshal.ReadByte(iterator.Ptr, 1); //green_sum += iterator.ptr[1];
red_sum += Marshal.ReadByte(iterator.Ptr, 2); //red_sum += iterator.ptr[2];
Cv.NEXT_LINE_POINT(iterator);
PrintCoordinate(image, iterator);
}
return new CvScalar(blue_sum, green_sum, red_sum);
}