本文整理汇总了C#中OpenCvSharp.IplImage.Circle方法的典型用法代码示例。如果您正苦于以下问题:C# IplImage.Circle方法的具体用法?C# IplImage.Circle怎么用?C# IplImage.Circle使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OpenCvSharp.IplImage
的用法示例。
在下文中一共展示了IplImage.Circle方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Contour
public Contour()
{
// cvContourArea, cvArcLength
// 輪郭によって区切られた領域の面積と,輪郭の長さを求める
const int SIZE = 500;
// (1)画像を確保し初期化する
using (CvMemStorage storage = new CvMemStorage())
using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
{
img.Zero();
// (2)点列を生成する
CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.PolyLine, storage);
CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);
double scale = rng.RandReal() + 0.5;
CvPoint pt0 = new CvPoint
{
X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
};
img.Circle(pt0, 2, CvColor.Green);
points.Push(pt0);
for (int i = 1; i < 20; i++)
{
scale = rng.RandReal() + 0.5;
CvPoint pt1 = new CvPoint
{
X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
};
img.Line(pt0, pt1, CvColor.Green, 2);
pt0.X = pt1.X;
pt0.Y = pt1.Y;
img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
points.Push(pt0);
}
img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
// (3)包含矩形,面積,長さを求める
CvRect rect = points.BoundingRect(false);
double area = points.ContourArea();
double length = points.ArcLength(CvSlice.WholeSeq, 1);
// (4)結果を画像に書き込む
img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
string text_area = string.Format("Area: wrect={0}, contour={1}", rect.Width * rect.Height, area);
string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
{
img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
}
// (5)画像を表示,キーが押されたときに終了
using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
{
window.Image = img;
CvWindow.WaitKey(0);
}
}
}
示例2: FitLine
public FitLine()
{
CvSize imageSize = new CvSize(500, 500);
// cvFitLine
CvPoint2D32f[] points = GetRandomPoints(20, imageSize);
CvLine2D line = Cv.FitLine2D(points, DistanceType.L2, 0, 0.01, 0.01);
using (IplImage img = new IplImage(imageSize, BitDepth.U8, 3))
{
img.Zero();
// draw line
{
CvPoint pt1, pt2;
line.FitSize(img.Width, img.Height, out pt1, out pt2);
img.Line(pt1, pt2, CvColor.Green, 1, LineType.Link8);
}
// draw points and distances
using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.33, 0.33))
{
foreach (CvPoint2D32f p in points)
{
double d = line.Distance(p);
img.Circle(p, 2, CvColor.White, -1, LineType.AntiAlias);
img.PutText(string.Format("{0:F1}", d), new CvPoint((int) (p.X + 3), (int) (p.Y + 3)), font, CvColor.Green);
}
}
CvWindow.ShowImages(img);
}
}
示例3: FaceDetect
public FaceDetect()
{
CheckMemoryLeak();
// CvHaarClassifierCascade, cvHaarDetectObjects
CvColor[] colors = new CvColor[]{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double Scale = 1.14;
const double ScaleFactor = 1.0850;
const int MinNeighbors = 2;
using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
{
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))
using (var storage = new CvMemStorage())
{
storage.Clear();
// 顔の検出
Stopwatch watch = Stopwatch.StartNew();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
watch.Stop();
Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);
// 検出した箇所にまるをつける
for (int i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * Scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
}
}
// ウィンドウに表示
CvWindow.ShowImages(img);
}
}
示例4: Watershed
public Watershed()
{
// cvWatershed
// マウスで円形のマーカー(シード領域)の中心を指定し,複数のマーカーを設定する.
// このマーカを画像のgradientに沿って広げて行き,gradientの高い部分に出来る境界を元に領域を分割する.
// 領域は,最初に指定したマーカーの数に分割される.
// (2)画像の読み込み,マーカー画像の初期化,結果表示用画像領域の確保を行なう
using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImg = srcImg.Clone())
using (IplImage dspImg = srcImg.Clone())
using (IplImage markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
{
markers.Zero();
// (3)入力画像を表示しシードコンポーネント指定のためのマウスイベントを登録する
using (CvWindow wImage = new CvWindow("image", WindowMode.AutoSize))
{
wImage.Image = srcImg;
// クリックにより中心を指定し,円形のシード領域を設定する
int seedNum = 0;
wImage.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
{
if (ev == MouseEvent.LButtonDown)
{
seedNum++;
CvPoint pt = new CvPoint(x, y);
markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
wImage.Image = dspImg;
}
};
CvWindow.WaitKey();
}
// (4)watershed分割を実行する
Cv.Watershed(srcImg, markers);
// (5)実行結果の画像中のwatershed境界(ピクセル値=-1)を結果表示用画像上に表示する
for (int i = 0; i < markers.Height; i++)
{
for (int j = 0; j < markers.Width; j++)
{
int idx = (int)(markers.Get2D(i, j).Val0);
if (idx == -1)
{
dstImg.Set2D(i, j, CvColor.Red);
}
}
}
using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
{
wDst.Image = dstImg;
CvWindow.WaitKey();
}
}
}
示例5: CppStyleMSER
/// <summary>
/// Extracts MSER by C++-style code (cv::MSER)
/// </summary>
/// <param name="imgGray"></param>
/// <param name="imgRender"></param>
private void CppStyleMSER(IplImage imgGray, IplImage imgDst)
{
MSER mser = new MSER();
CvPoint[][] contours = mser.Extract(new Mat(imgGray, false), null); // operator()
foreach (CvPoint[] p in contours)
{
CvColor color = CvColor.Random();
for (int i = 0; i < p.Length; i++)
{
imgDst.Circle(p[i], 1, color);
}
}
}
示例6: Watershed
public Watershed()
{
using (var srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (var dstImg = srcImg.Clone())
using (var dspImg = srcImg.Clone())
using (var markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
{
markers.Zero();
using (var window = new CvWindow("image", WindowMode.AutoSize))
{
window.Image = srcImg;
// Mouse event
int seedNum = 0;
window.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
{
if (ev == MouseEvent.LButtonDown)
{
seedNum++;
CvPoint pt = new CvPoint(x, y);
markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
window.Image = dspImg;
}
};
CvWindow.WaitKey();
}
Cv.Watershed(srcImg, markers);
// draws watershed
for (int i = 0; i < markers.Height; i++)
{
for (int j = 0; j < markers.Width; j++)
{
int idx = (int)(markers.Get2D(i, j).Val0);
if (idx == -1)
{
dstImg.Set2D(i, j, CvColor.Red);
}
}
}
using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
{
wDst.Image = dstImg;
CvWindow.WaitKey();
}
}
}
示例7: BoundingRect
public BoundingRect()
{
// cvBoundingRect
// 点列を包含する矩形を求める
// (1)画像とメモリストレージを確保し初期化する
// (メモリストレージは、CvSeqを使わないのであれば不要)
using (IplImage img = new IplImage(640, 480, BitDepth.U8, 3))
using (CvMemStorage storage = new CvMemStorage(0))
{
img.Zero();
CvRNG rng = new CvRNG(DateTime.Now);
// (2)点列を生成する
///*
// お手軽な方法 (普通の配列を使う)
CvPoint[] points = new CvPoint[50];
for (int i = 0; i < 50; i++)
{
points[i] = new CvPoint()
{
X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4),
Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4)
};
img.Circle(points[i], 3, new CvColor(0, 255, 0), Cv.FILLED);
}
//*/
/*
// サンプルに準拠した方法 (CvSeqを使う)
CvSeq points = new CvSeq(SeqType.EltypePoint, CvSeq.SizeOf, CvPoint.SizeOf, storage);
for (int i = 0; i < 50; i++) {
CvPoint pt = new CvPoint();
pt.X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4);
pt.Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4);
points.Push(pt);
img.Circle(pt, 3, new CvColor(0, 255, 0), Cv.FILLED);
}
//*/
// (3)点列を包含する矩形を求めて描画する
CvRect rect = Cv.BoundingRect(points);
img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), new CvColor(255, 0, 0), 2);
// (4)画像の表示,キーが押されたときに終了
using (CvWindow w = new CvWindow("BoundingRect", WindowMode.AutoSize, img))
{
CvWindow.WaitKey(0);
}
}
}
示例8: Blob
public Blob()
{
using (var imgSrc = new IplImage(FilePath.Image.Shapes, LoadMode.Color))
using (var imgBinary = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (var imgRender = new IplImage(imgSrc.Size, BitDepth.U8, 3))
using (var imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3))
using (var imgPolygon = new IplImage(imgSrc.Size, BitDepth.U8, 3))
{
Cv.CvtColor(imgSrc, imgBinary, ColorConversion.BgrToGray);
Cv.Threshold(imgBinary, imgBinary, 100, 255, ThresholdType.Binary);
CvBlobs blobs = new CvBlobs();
blobs.Label(imgBinary);
foreach (KeyValuePair<int, CvBlob> item in blobs)
{
CvBlob b = item.Value;
Console.WriteLine("{0} | Centroid:{1} Area:{2}", item.Key, b.Centroid, b.Area);
CvContourChainCode cc = b.Contour;
cc.Render(imgContour);
CvContourPolygon polygon = cc.ConvertToPolygon();
foreach (CvPoint p in polygon)
{
imgPolygon.Circle(p, 1, CvColor.Red, -1);
}
/*
CvPoint2D32f circleCenter;
float circleRadius;
GetEnclosingCircle(polygon, out circleCenter, out circleRadius);
imgPolygon.Circle(circleCenter, (int) circleRadius, CvColor.Green, 2);
*/
}
blobs.RenderBlobs(imgSrc, imgRender);
using (new CvWindow("render", imgRender))
using (new CvWindow("contour", imgContour))
using (new CvWindow("polygon vertices", imgPolygon))
{
Cv.WaitKey(0);
}
}
}
示例9: CStyleMSER
/// <summary>
/// Extracts MSER by C-style code (cvExtractMSER)
/// </summary>
/// <param name="imgGray"></param>
/// <param name="imgRender"></param>
private void CStyleMSER(IplImage imgGray, IplImage imgDst)
{
using (CvMemStorage storage = new CvMemStorage())
{
CvContour[] contours;
CvMSERParams param = new CvMSERParams();
Cv.ExtractMSER(imgGray, null, out contours, storage, param);
foreach (CvContour c in contours)
{
CvColor color = CvColor.Random();
for (int i = 0; i < c.Total; i++)
{
imgDst.Circle(c[i].Value, 1, color);
}
}
}
}
示例10: BlobOld
public BlobOld()
{
using (IplImage imgSrc = new IplImage(Const.ImageShapes, LoadMode.Color))
using (IplImage imgBinary = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgLabel = new IplImage(imgSrc.Size, BitDepth.F32, 1))
using (IplImage imgRender = new IplImage(imgSrc.Size, BitDepth.U8, 3))
using (IplImage imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3))
using (IplImage imgPolygon = new IplImage(imgSrc.Size, BitDepth.U8, 3))
{
Cv.CvtColor(imgSrc, imgBinary, ColorConversion.BgrToGray);
Cv.Threshold(imgBinary, imgBinary, 100, 255, ThresholdType.Binary);
using (CvBlobs blobs = new CvBlobs())
{
uint result = blobs.Label(imgBinary, imgLabel);
foreach (KeyValuePair<uint, CvBlob> item in blobs)
{
CvBlob b = item.Value;
Console.WriteLine("{0} | Centroid:{1} Area:{2}", item.Key, b.Centroid, b.Area);
CvContourChainCode cc = b.Contour;
cc.RenderContourChainCode(imgContour);
CvContourPolygon polygon = cc.ConvertChainCodesToPolygon();
foreach (CvPoint p in polygon)
{
imgPolygon.Circle(p, 1, CvColor.Red, -1);
}
}
blobs.RenderBlobs(imgLabel, imgSrc, imgRender);
using (new CvWindow("render", imgRender))
using (new CvWindow("contour", imgContour))
using (new CvWindow("polygon vertices", imgPolygon))
{
Cv.WaitKey(0);
}
}
}
}
示例11: SVM
//.........这里部分代码省略.........
img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(255, 0, 0));
img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(255, 0, 0));
res[i] = 1;
}
else
{
if (pts[i].X > 200)
{
img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 255, 0));
img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 255, 0));
res[i] = 2;
}
else
{
img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 0, 255));
img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 0, 255));
res[i] = 3;
}
}
}
// (3)学習データの表示
Cv.NamedWindow("SVM", WindowMode.AutoSize);
Cv.ShowImage("SVM", img);
Cv.WaitKey(0);
// (4)学習パラメータの生成
float[] data = new float[S * 2];
for (int i = 0; i < S; i++)
{
data[i * 2] = ((float)pts[i].X) / SIZE;
data[i * 2 + 1] = ((float)pts[i].Y) / SIZE;
}
// (5)SVMの学習
using (CvSVM svm = new CvSVM())
{
CvMat data_mat = new CvMat(S, 2, MatrixType.F32C1, data);
CvMat res_mat = new CvMat(S, 1, MatrixType.S32C1, res);
CvTermCriteria criteria = new CvTermCriteria(1000, float.Epsilon);
CvSVMParams param = new CvSVMParams(SVMType.CSvc, SVMKernelType.Rbf, 10.0, 8.0, 1.0, 10.0, 0.5, 0.1, null, criteria);
svm.Train(data_mat, res_mat, null, null, param);
// (6)学習結果の描画
for (int i = 0; i < SIZE; i++)
{
for (int j = 0; j < SIZE; j++)
{
float[] a = { (float)j / SIZE, (float)i / SIZE };
CvMat m = new CvMat(1, 2, MatrixType.F32C1, a);
float ret = svm.Predict(m);
CvColor color = new CvColor();
switch ((int)ret)
{
case 1:
color = new CvColor(100, 0, 0); break;
case 2:
color = new CvColor(0, 100, 0); break;
case 3:
color = new CvColor(0, 0, 100); break;
}
img[i, j] = color;
}
}
// (7)トレーニングデータの再描画
for (int i = 0; i < S; i++)
{
CvColor color = new CvColor();
switch (res[i])
{
case 1:
color = new CvColor(255, 0, 0); break;
case 2:
color = new CvColor(0, 255, 0); break;
case 3:
color = new CvColor(0, 0, 255); break;
}
img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), color);
img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), color);
}
// (8)サポートベクターの描画
int sv_num = svm.GetSupportVectorCount();
for (int i = 0; i < sv_num; i++)
{
var support = svm.GetSupportVector(i);
img.Circle(new CvPoint((int)(support[0] * SIZE), (int)(support[1] * SIZE)), 5, new CvColor(200, 200, 200));
}
// (9)画像の表示
Cv.NamedWindow("SVM", WindowMode.AutoSize);
Cv.ShowImage("SVM", img);
Cv.WaitKey(0);
Cv.DestroyWindow("SVM");
}
}
}
示例12: FaceDetect
public void FaceDetect()
{
// CvHaarClassifierCascade, cvHaarDetectObjects
// 顔を検出するためにHaar分類器のカスケードを用いる
CvColor[] colors = new CvColor[]{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double Scale = 1.14;
const double ScaleFactor = 1.0850;
const int MinNeighbors = 2;
using (IplImage img = new IplImage(Application.dataPath + TestImageName, LoadMode.Color))
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
{
// 顔検出用の画像の生成
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
//using (CvHaarClassifierCascade cascade = Cv.Load<CvHaarClassifierCascade>(Const.XmlHaarcascade)) // どっちでも可
using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Application.dataPath + TestTextName)) //
using (CvMemStorage storage = new CvMemStorage())
{
storage.Clear();
// 顔の検出
Stopwatch watch = Stopwatch.StartNew();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
watch.Stop();
// Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);
UnityEngine.Debug.Log("detection time = " + watch.ElapsedMilliseconds + " ms");
int i=0;
for (i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * Scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
}
}
// ウィンドウに表示
CvWindow.ShowImages(img);
}
}
示例13: DrawSubdivPoint
/// <summary>
///
/// </summary>
/// <param name="img"></param>
/// <param name="fp"></param>
/// <param name="color"></param>
private void DrawSubdivPoint(IplImage img, CvPoint2D32f fp, CvColor color)
{
img.Circle(fp, 3, color, Cv.FILLED, LineType.AntiAlias, 0);
}
示例14: MDS
/// <summary>
/// Classical Multidimensional Scaling
/// </summary>
public MDS()
{
// creates distance matrix
int size = CityDistance.GetLength(0);
CvMat t = new CvMat(size, size, MatrixType.F64C1, CityDistance);
// adds Torgerson's additive constant to t
t += Torgerson(t);
// squares all elements of t
t.Mul(t, t);
// centering matrix G
CvMat g = CenteringMatrix(size);
// calculates inner product matrix B
CvMat b = g * t * g.T() * -0.5;
// calculates eigenvalues and eigenvectors of B
CvMat vectors = new CvMat(size, size, MatrixType.F64C1);
CvMat values = new CvMat(size, 1, MatrixType.F64C1);
Cv.EigenVV(b, vectors, values);
for (int r = 0; r < values.Rows; r++)
{
if (values[r] < 0)
values[r] = 0;
}
// multiplies sqrt(eigenvalue) by eigenvector
CvMat result = vectors.GetRows(0, 2);
for (int r = 0; r < result.Rows; r++)
{
for (int c = 0; c < result.Cols; c++)
{
result[r, c] *= Math.Sqrt(values[r]);
}
}
// scaling
Cv.Normalize(result, result, 0, 800, NormType.MinMax);
//Console.WriteLine(vectors);
//Console.WriteLine(values);
//Console.WriteLine(result);
// opens a window
using (IplImage img = new IplImage(800, 600, BitDepth.U8, 3))
using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.5f, 0.5f))
using (CvWindow window = new CvWindow("City Location Estimation"))
{
img.Zero();
for (int c = 0; c < size; c++)
{
double x = result[0, c];
double y = result[1, c];
x = x * 0.7 + img.Width * 0.1;
y = y * 0.7 + img.Height * 0.1;
img.Circle((int)x, (int)y, 5, CvColor.Red, -1);
img.PutText(CityNames[c], new CvPoint((int)x+5, (int)y+10), font, CvColor.White);
}
window.Image = img;
Cv.WaitKey();
}
}