本文整理汇总了C#中OpenCvSharp.IplImage类的典型用法代码示例。如果您正苦于以下问题:C# IplImage类的具体用法?C# IplImage怎么用?C# IplImage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
IplImage类属于OpenCvSharp命名空间,在下文中一共展示了IplImage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: GetSub
public static OpenCvSharp.IplImage GetSub(this OpenCvSharp.IplImage ipl, OpenCvSharp.CvRect subRect)
{
if (ipl == null)
throw new ArgumentNullException("ipl", "ipl is null.");
var boundingRect = new CvRect(0, 0, ipl.Width, ipl.Height);
if (!boundingRect.Contains(subRect))
throw new InvalidOperationException("subRect is outside of ipl");
try
{
ipl.SetROI(subRect);
OpenCvSharp.IplImage sub = new IplImage(
ipl.GetSize(),
ipl.Depth,
ipl.NChannels);
ipl.Copy(sub);
return sub;
}
finally
{
ipl.ResetROI();
}
}
示例2: Canny
public ActionResult Canny(HttpPostedFileBase imageData)
{
if (imageData != null)
{
using (var image = IplImage.FromStream(imageData.InputStream, LoadMode.Color))
{
using (var grayImage = new IplImage(image.Size, BitDepth.U8, 1))
using (var cannyImage = new IplImage(image.Size, BitDepth.U8, 1))
{
Cv.CvtColor(image, grayImage, ColorConversion.BgrToGray);
Cv.Canny(grayImage, cannyImage, 60, 180);
byte[] cannyBytes = cannyImage.ToBytes(".png");
string base64 = Convert.ToBase64String(cannyBytes);
ViewBag.Base64Image = base64;
byte[] originalBytes = image.ToBytes(".png");
string base64Org = Convert.ToBase64String(originalBytes);
ViewBag.Base64OrgImage = base64Org;
byte[] grayBytes = grayImage.ToBytes(".png");
string base64Gray = Convert.ToBase64String(grayBytes);
ViewBag.Base64GrayImage = base64Gray;
}
}
}
return View();
}
示例3: SetSize
public void SetSize(IplImage i)
{
this.Size = new Size(i.Size.Width+10, i.Size.Height+10);
this.pictureBoxIpl1.ImageIpl = i;
}
示例4: DrawToHdc
public DrawToHdc()
{
CvRect roi = new CvRect(320, 260, 100, 100); // region of roosevelt's face
using (IplImage src = new IplImage(Const.ImageYalta, LoadMode.Color))
using (IplImage dst = new IplImage(roi.Size, BitDepth.U8, 3))
{
src.ROI = roi;
using (Bitmap bitmap = new Bitmap(roi.Width, roi.Height, PixelFormat.Format32bppArgb))
using (Graphics g = Graphics.FromImage(bitmap))
{
//BitmapConverter.DrawToGraphics(src, g, new CvRect(new CvPoint(0, 0), roi.Size));
IntPtr hdc = g.GetHdc();
BitmapConverter.DrawToHdc(src, hdc, new CvRect(new CvPoint(0,0), roi.Size));
g.ReleaseHdc(hdc);
g.DrawString("Roosevelt", new Font(FontFamily.GenericSerif, 12), Brushes.Red, 20, 0);
g.DrawEllipse(new Pen(Color.Red, 4), new Rectangle(20, 20, roi.Width/2, roi.Height/2));
dst.CopyFrom(bitmap);
}
src.ResetROI();
using (new CvWindow("src", src))
using (new CvWindow("dst", dst))
{
Cv.WaitKey();
}
}
}
示例5: CornerDetect
public CornerDetect()
{
// cvGoodFeaturesToTrack, cvFindCornerSubPix
// 画像中のコーナー(特徴点)検出
int cornerCount = 150;
using (IplImage dstImg1 = new IplImage(Const.ImageLenna, LoadMode.AnyColor | LoadMode.AnyDepth))
using (IplImage dstImg2 = dstImg1.Clone())
using (IplImage srcImgGray = new IplImage(Const.ImageLenna, LoadMode.GrayScale))
using (IplImage eigImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
using (IplImage tempImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
{
CvPoint2D32f[] corners;
// (1)cvCornerMinEigenValを利用したコーナー検出
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
// (2)コーナーの描画
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg1, corners[i], 3, new CvColor(255, 0, 0), 2);
// (3)cvCornerHarrisを利用したコーナー検出
cornerCount = 150;
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15, null, 3, true, 0.01);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
// (4)コーナーの描画
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg2, corners[i], 3, new CvColor(0, 0, 255), 2);
// (5)画像の表示
using (new CvWindow("EigenVal", WindowMode.AutoSize, dstImg1))
using (new CvWindow("Harris", WindowMode.AutoSize, dstImg2))
{
Cv.WaitKey(0);
}
}
}
示例6: SaveImage
public SaveImage()
{
using (IplImage img = new IplImage(Const.Image16bit, LoadMode.Color))
{
// JPEG quality test
img.SaveImage("q000.jpg", new JpegEncodingParam(0));
img.SaveImage("q025.jpg", new JpegEncodingParam(25));
img.SaveImage("q050.jpg", new JpegEncodingParam(50));
img.SaveImage("q075.jpg", new JpegEncodingParam(75));
img.SaveImage("q100.jpg", new JpegEncodingParam(100));
using (IplImage q000 = new IplImage("q000.jpg", LoadMode.Color))
using (IplImage q025 = new IplImage("q025.jpg", LoadMode.Color))
using (IplImage q050 = new IplImage("q050.jpg", LoadMode.Color))
using (IplImage q075 = new IplImage("q075.jpg", LoadMode.Color))
using (IplImage q100 = new IplImage("q100.jpg", LoadMode.Color))
using (CvWindow w000 = new CvWindow("quality 0", q000))
using (CvWindow w025 = new CvWindow("quality 25", q025))
using (CvWindow w050 = new CvWindow("quality 50", q050))
using (CvWindow w075 = new CvWindow("quality 75", q075))
using (CvWindow w100 = new CvWindow("quality 100", q100))
{
Cv.WaitKey();
}
}
}
示例7: ConvertToBitmapSource
public ConvertToBitmapSource()
{
BitmapSource bs = null;
// OpenCVによる画像処理 (Threshold)
using (IplImage src = new IplImage(Const.ImageLenna, LoadMode.GrayScale))
using (IplImage dst = new IplImage(src.Size, BitDepth.U8, 1))
{
src.Smooth(src, SmoothType.Gaussian, 5);
src.Threshold(dst, 0, 255, ThresholdType.Otsu);
// IplImage -> BitmapSource
bs = dst.ToBitmapSource();
//bs = BitmapSourceConverter.ToBitmapSource(dst);
}
// WPFのWindowに表示してみる
Image image = new Image { Source = bs };
Window window = new Window
{
Title = "from IplImage to BitmapSource",
Width = bs.PixelWidth,
Height = bs.PixelHeight,
Content = image
};
Application app = new Application();
app.Run(window);
}
示例8: CornerDetect
public CornerDetect()
{
int cornerCount = 150;
using (IplImage dstImg1 = new IplImage(FilePath.Image.Lenna, LoadMode.AnyColor | LoadMode.AnyDepth))
using (IplImage dstImg2 = dstImg1.Clone())
using (IplImage srcImgGray = new IplImage(FilePath.Image.Lenna, LoadMode.GrayScale))
using (IplImage eigImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
using (IplImage tempImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
{
CvPoint2D32f[] corners;
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg1, corners[i], 3, new CvColor(255, 0, 0), 2);
cornerCount = 150;
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15, null, 3, true, 0.01);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg2, corners[i], 3, new CvColor(0, 0, 255), 2);
using (new CvWindow("EigenVal", WindowMode.AutoSize, dstImg1))
using (new CvWindow("Harris", WindowMode.AutoSize, dstImg2))
{
Cv.WaitKey(0);
}
}
}
示例9: Squares
public Squares()
{
// create memory storage that will contain all the dynamic data
CvMemStorage storage = new CvMemStorage(0);
for (int i = 0; i < _names.Length; i++)
{
// load i-th image
using (IplImage img = new IplImage(_names[i], LoadMode.Color))
{
// create window and a trackbar (slider) with parent "image" and set callback
// (the slider regulates upper threshold, passed to Canny edge detector)
Cv.NamedWindow(WindowName, WindowMode.AutoSize);
// find and draw the squares
DrawSquares(img, FindSquares4(img, storage));
}
// clear memory storage - reset free space position
storage.Clear();
// wait for key.
// Also the function cvWaitKey takes care of event processing
int c = Cv.WaitKey(0);
if ((char)c == 27)
break;
}
Cv.DestroyWindow(WindowName);
}
示例10: Undistort
public Undistort()
{
using (IplImage srcImg = new IplImage(FilePath.Image.Distortion, LoadMode.Color))
using (IplImage dstImg = srcImg.Clone())
{
CvMat intrinsic, distortion;
using (CvFileStorage fs = new CvFileStorage(FilePath.Text.Camera, null, FileStorageMode.Read))
{
CvFileNode param = fs.GetFileNodeByName(null, "intrinsic");
intrinsic = fs.Read<CvMat>(param);
param = fs.GetFileNodeByName(null, "distortion");
distortion = fs.Read<CvMat>(param);
}
Cv.Undistort2(srcImg, dstImg, intrinsic, distortion);
using (new CvWindow("Distortion", WindowMode.AutoSize, srcImg))
using (new CvWindow("Undistortion", WindowMode.AutoSize, dstImg))
{
CvWindow.WaitKey(0);
}
intrinsic.Dispose();
distortion.Dispose();
}
}
示例11: Undistort
public Undistort()
{
// cvUndistort2
// キャリブレーションデータを利用して,歪みを補正する
// (1)補正対象となる画像の読み込み
using (IplImage srcImg = new IplImage(Const.ImageDistortion, LoadMode.Color))
using (IplImage dstImg = srcImg.Clone())
{
// (2)パラメータファイルの読み込み
CvMat intrinsic, distortion;
using (CvFileStorage fs = new CvFileStorage(Const.XmlCamera, null, FileStorageMode.Read))
{
CvFileNode param = fs.GetFileNodeByName(null, "intrinsic");
intrinsic = fs.Read<CvMat>(param);
param = fs.GetFileNodeByName(null, "distortion");
distortion = fs.Read<CvMat>(param);
}
// (3)歪み補正
Cv.Undistort2(srcImg, dstImg, intrinsic, distortion);
// (4)画像を表示,キーが押されたときに終了
using (CvWindow w1 = new CvWindow("Distortion", WindowMode.AutoSize, srcImg))
using (CvWindow w2 = new CvWindow("Undistortion", WindowMode.AutoSize, dstImg))
{
CvWindow.WaitKey(0);
}
intrinsic.Dispose();
distortion.Dispose();
}
}
示例12: ShowCvWindow
private static void ShowCvWindow(IplImage image)
{
Cv.NamedWindow("window");
Cv.ShowImage("window", image);
Cv.WaitKey();
Cv.DestroyWindow("window");
}
示例13: Filter2D
public Filter2D()
{
// cvFilter2D
// ユーザが定義したカーネルによるフィルタリング
// (1)画像の読み込み
using (IplImage srcImg = new IplImage(Const.ImageFruits, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImg = new IplImage(srcImg.Size, srcImg.Depth, srcImg.NChannels))
{
// (2)カーネルの正規化と,フィルタ処理
float[] data = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
CvMat kernel = new CvMat(1, 21, MatrixType.F32C1, data);
Cv.Normalize(kernel, kernel, 1.0, 0, NormType.L1);
Cv.Filter2D(srcImg, dstImg, kernel, new CvPoint(0, 0));
// (3)結果を表示する
using (CvWindow window = new CvWindow("Filter2D", dstImg))
{
Cv.WaitKey(0);
}
}
}
示例14: Resize
public Resize()
{
// cvResize
// 指定した出力画像サイズに合うように、入力画像のサイズを変更し出力する.
// (1)画像を読み込む
using (IplImage src = new IplImage(Const.ImageSquare5, LoadMode.AnyColor | LoadMode.AnyDepth))
{
// (2)出力用画像領域の確保を行なう
CvSize size = new CvSize(src.Width * 2, src.Height * 2);
using (IplImage dstNN = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstCubic = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstLinear = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstLanczos = new IplImage(size, src.Depth, src.NChannels))
{
// (3)画像のサイズ変更を行う
Cv.Resize(src, dstNN, Interpolation.NearestNeighbor);
Cv.Resize(src, dstCubic, Interpolation.Cubic);
Cv.Resize(src, dstLinear, Interpolation.Linear);
Cv.Resize(src, dstLanczos, Interpolation.Lanczos4);
// (4)結果を表示する
using (new CvWindow("src", src))
using (new CvWindow("dst NearestNeighbor", dstNN))
using (new CvWindow("dst Cubic", dstCubic))
using (new CvWindow("dst Linear", dstLinear))
using (new CvWindow("dst Lanczos4", dstLanczos))
{
Cv.WaitKey();
}
}
}
}
示例15: FaceDetect
public FaceDetect()
{
CheckMemoryLeak();
// CvHaarClassifierCascade, cvHaarDetectObjects
CvColor[] colors = new CvColor[]{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double Scale = 1.14;
const double ScaleFactor = 1.0850;
const int MinNeighbors = 2;
using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
{
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))
using (var storage = new CvMemStorage())
{
storage.Clear();
// 顔の検出
Stopwatch watch = Stopwatch.StartNew();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
watch.Stop();
Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);
// 検出した箇所にまるをつける
for (int i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * Scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
}
}
// ウィンドウに表示
CvWindow.ShowImages(img);
}
}