本文整理汇总了C#中OpenCvSharp.IplImage.Clone方法的典型用法代码示例。如果您正苦于以下问题:C# IplImage.Clone方法的具体用法?C# IplImage.Clone怎么用?C# IplImage.Clone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OpenCvSharp.IplImage
的用法示例。
在下文中一共展示了IplImage.Clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: PreProcess
public void PreProcess()
{
//Cv.NamedWindow("anhthoai", WindowMode.AutoSize);
IplConvKernel element = Cv.CreateStructuringElementEx(21, 3, 10, 2, ElementShape.Rect, null);
timg = new IplImage(src.Size, BitDepth.U8, 1);
IplImage temp = timg.Clone();
IplImage dest = timg.Clone();
src.CvtColor(timg, ColorConversion.RgbaToGray);
pimg = timg.Clone();
//Cv.Threshold(pimg, pimg, 128, 255, ThresholdType.Binary | ThresholdType.Otsu);
Cv.Smooth(timg, timg, SmoothType.Gaussian);
Cv.MorphologyEx(timg, dest, temp, element, MorphologyOperation.TopHat, 1);
Cv.Threshold(dest, timg, 180, 255, ThresholdType.Binary | ThresholdType.Otsu);
//Cv.AdaptiveThreshold(dest, timg, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary,75, 0);
Cv.Smooth(timg, dest, SmoothType.Median);
Cv.Dilate(dest, dest, element, 2);
/*using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
{
window.Image = dest;
CvWindow.WaitKey(0);
}*/
//Cv.ShowImage("anhthoai", dest);
Cv.ReleaseImage(temp);
Cv.ReleaseImage(dest);
}
示例2: Watershed
public Watershed()
{
// cvWatershed
// マウスで円形のマーカー(シード領域)の中心を指定し,複数のマーカーを設定する.
// このマーカを画像のgradientに沿って広げて行き,gradientの高い部分に出来る境界を元に領域を分割する.
// 領域は,最初に指定したマーカーの数に分割される.
// (2)画像の読み込み,マーカー画像の初期化,結果表示用画像領域の確保を行なう
using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImg = srcImg.Clone())
using (IplImage dspImg = srcImg.Clone())
using (IplImage markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
{
markers.Zero();
// (3)入力画像を表示しシードコンポーネント指定のためのマウスイベントを登録する
using (CvWindow wImage = new CvWindow("image", WindowMode.AutoSize))
{
wImage.Image = srcImg;
// クリックにより中心を指定し,円形のシード領域を設定する
int seedNum = 0;
wImage.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
{
if (ev == MouseEvent.LButtonDown)
{
seedNum++;
CvPoint pt = new CvPoint(x, y);
markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
wImage.Image = dspImg;
}
};
CvWindow.WaitKey();
}
// (4)watershed分割を実行する
Cv.Watershed(srcImg, markers);
// (5)実行結果の画像中のwatershed境界(ピクセル値=-1)を結果表示用画像上に表示する
for (int i = 0; i < markers.Height; i++)
{
for (int j = 0; j < markers.Width; j++)
{
int idx = (int)(markers.Get2D(i, j).Val0);
if (idx == -1)
{
dstImg.Set2D(i, j, CvColor.Red);
}
}
}
using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
{
wDst.Image = dstImg;
CvWindow.WaitKey();
}
}
}
示例3: Watershed
public Watershed()
{
using (var srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (var dstImg = srcImg.Clone())
using (var dspImg = srcImg.Clone())
using (var markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
{
markers.Zero();
using (var window = new CvWindow("image", WindowMode.AutoSize))
{
window.Image = srcImg;
// Mouse event
int seedNum = 0;
window.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
{
if (ev == MouseEvent.LButtonDown)
{
seedNum++;
CvPoint pt = new CvPoint(x, y);
markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
window.Image = dspImg;
}
};
CvWindow.WaitKey();
}
Cv.Watershed(srcImg, markers);
// draws watershed
for (int i = 0; i < markers.Height; i++)
{
for (int j = 0; j < markers.Width; j++)
{
int idx = (int)(markers.Get2D(i, j).Val0);
if (idx == -1)
{
dstImg.Set2D(i, j, CvColor.Red);
}
}
}
using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
{
wDst.Image = dstImg;
CvWindow.WaitKey();
}
}
}
示例4: Morphology
public Morphology()
{
using (IplImage srcImg = new IplImage(FilePath.Image.Lenna, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImgDilate = srcImg.Clone())
using (IplImage dstImgErode = srcImg.Clone())
using (IplImage dstImgOpening = srcImg.Clone())
using (IplImage dstImgClosing = srcImg.Clone())
using (IplImage dstImgGradient = srcImg.Clone())
using (IplImage dstImgTophat = srcImg.Clone())
using (IplImage dstImgBlackhat = srcImg.Clone())
using (IplImage tmpImg = srcImg.Clone())
{
IplConvKernel element = Cv.CreateStructuringElementEx(9, 9, 4, 4, ElementShape.Rect, null);
Cv.Dilate(srcImg, dstImgDilate, element, 1);
Cv.Erode(srcImg, dstImgErode, element, 1);
Cv.MorphologyEx(srcImg, dstImgOpening, tmpImg, element, MorphologyOperation.Open, 1);
Cv.MorphologyEx(srcImg, dstImgClosing, tmpImg, element, MorphologyOperation.Close, 1);
Cv.MorphologyEx(srcImg, dstImgGradient, tmpImg, element, MorphologyOperation.Gradient, 1);
Cv.MorphologyEx(srcImg, dstImgTophat, tmpImg, element, MorphologyOperation.TopHat, 1);
Cv.MorphologyEx(srcImg, dstImgBlackhat, tmpImg, element, MorphologyOperation.BlackHat, 1);
using (new CvWindow("src", srcImg))
using (new CvWindow("dilate", dstImgDilate))
using (new CvWindow("erode", dstImgErode))
using (new CvWindow("opening", dstImgOpening))
using (new CvWindow("closing", dstImgClosing))
using (new CvWindow("gradient", dstImgGradient))
using (new CvWindow("tophat", dstImgTophat))
using (new CvWindow("blackhat", dstImgBlackhat))
{
Cv.WaitKey(0);
}
}
}
示例5: Perspective
public Perspective()
{
using (var srcImg = new IplImage(FilePath.Image.Lenna, LoadMode.AnyDepth | LoadMode.AnyColor))
using (var dstImg = srcImg.Clone())
{
CvPoint2D32f[] srcPnt = new CvPoint2D32f[4];
CvPoint2D32f[] dstPnt = new CvPoint2D32f[4];
srcPnt[0] = new CvPoint2D32f(150.0f, 150.0f);
srcPnt[1] = new CvPoint2D32f(150.0f, 300.0f);
srcPnt[2] = new CvPoint2D32f(350.0f, 300.0f);
srcPnt[3] = new CvPoint2D32f(350.0f, 150.0f);
dstPnt[0] = new CvPoint2D32f(200.0f, 200.0f);
dstPnt[1] = new CvPoint2D32f(150.0f, 300.0f);
dstPnt[2] = new CvPoint2D32f(350.0f, 300.0f);
dstPnt[3] = new CvPoint2D32f(300.0f, 200.0f);
using (CvMat mapMatrix = Cv.GetPerspectiveTransform(srcPnt, dstPnt))
{
Cv.WarpPerspective(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(100));
using (new CvWindow("src", srcImg))
using (new CvWindow("dst", dstImg))
{
Cv.WaitKey(0);
}
}
}
}
示例6: LatentSVM
public LatentSVM()
{
using (var detector = new CvLatentSvmDetector(FilePath.Text.LatentSvmCat))
using (var imageSrc = new IplImage(FilePath.Image.Cat, LoadMode.Color))
using (var imageDst = imageSrc.Clone())
using (var storage = new CvMemStorage())
{
Console.WriteLine("Running LatentSVM...");
Stopwatch watch = Stopwatch.StartNew();
CvSeq<CvObjectDetection> result = detector.DetectObjects(imageSrc, storage, 0.5f, 2);
watch.Stop();
Console.WriteLine("Elapsed time: {0}ms", watch.ElapsedMilliseconds);
foreach (CvObjectDetection detection in result)
{
CvRect boundingBox = detection.Rect;
imageDst.Rectangle(
new CvPoint(boundingBox.X, boundingBox.Y),
new CvPoint(boundingBox.X + boundingBox.Width, boundingBox.Y + boundingBox.Height),
CvColor.Red, 3);
}
using (new CvWindow("LatentSVM result", imageDst))
{
Cv.WaitKey();
}
}
}
示例7: Affine
public Affine()
{
// cvGetAffineTransform + cvWarpAffine
// 画像上の3点対応よりアフィン変換行列を計算し,その行列を用いて画像全体のアフィン変換を行う.
// (1)画像の読み込み,出力用画像領域の確保を行なう
using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImg = srcImg.Clone())
{
// (2)三角形の回転前と回転後の対応する頂点をそれぞれセットし
// cvGetAffineTransformを用いてアフィン行列を求める
CvPoint2D32f[] srcPnt = new CvPoint2D32f[3];
CvPoint2D32f[] dstPnt = new CvPoint2D32f[3];
srcPnt[0] = new CvPoint2D32f(200.0f, 200.0f);
srcPnt[1] = new CvPoint2D32f(250.0f, 200.0f);
srcPnt[2] = new CvPoint2D32f(200.0f, 100.0f);
dstPnt[0] = new CvPoint2D32f(300.0f, 100.0f);
dstPnt[1] = new CvPoint2D32f(300.0f, 50.0f);
dstPnt[2] = new CvPoint2D32f(200.0f, 100.0f);
using (CvMat mapMatrix = Cv.GetAffineTransform(srcPnt, dstPnt))
{
// (3)指定されたアフィン行列により,cvWarpAffineを用いて画像を回転させる
Cv.WarpAffine(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(0));
// (4)結果を表示する
using (new CvWindow("src", srcImg))
using (new CvWindow("dst", dstImg))
{
Cv.WaitKey(0);
}
}
}
}
示例8: CornerDetect
public CornerDetect()
{
// cvGoodFeaturesToTrack, cvFindCornerSubPix
// 画像中のコーナー(特徴点)検出
int cornerCount = 150;
using (IplImage dstImg1 = new IplImage(Const.ImageLenna, LoadMode.AnyColor | LoadMode.AnyDepth))
using (IplImage dstImg2 = dstImg1.Clone())
using (IplImage srcImgGray = new IplImage(Const.ImageLenna, LoadMode.GrayScale))
using (IplImage eigImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
using (IplImage tempImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
{
CvPoint2D32f[] corners;
// (1)cvCornerMinEigenValを利用したコーナー検出
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
// (2)コーナーの描画
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg1, corners[i], 3, new CvColor(255, 0, 0), 2);
// (3)cvCornerHarrisを利用したコーナー検出
cornerCount = 150;
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15, null, 3, true, 0.01);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
// (4)コーナーの描画
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg2, corners[i], 3, new CvColor(0, 0, 255), 2);
// (5)画像の表示
using (new CvWindow("EigenVal", WindowMode.AutoSize, dstImg1))
using (new CvWindow("Harris", WindowMode.AutoSize, dstImg2))
{
Cv.WaitKey(0);
}
}
}
示例9: CornerDetect
public CornerDetect()
{
int cornerCount = 150;
using (IplImage dstImg1 = new IplImage(FilePath.Image.Lenna, LoadMode.AnyColor | LoadMode.AnyDepth))
using (IplImage dstImg2 = dstImg1.Clone())
using (IplImage srcImgGray = new IplImage(FilePath.Image.Lenna, LoadMode.GrayScale))
using (IplImage eigImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
using (IplImage tempImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
{
CvPoint2D32f[] corners;
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg1, corners[i], 3, new CvColor(255, 0, 0), 2);
cornerCount = 150;
Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15, null, 3, true, 0.01);
Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
for (int i = 0; i < cornerCount; i++)
Cv.Circle(dstImg2, corners[i], 3, new CvColor(0, 0, 255), 2);
using (new CvWindow("EigenVal", WindowMode.AutoSize, dstImg1))
using (new CvWindow("Harris", WindowMode.AutoSize, dstImg2))
{
Cv.WaitKey(0);
}
}
}
示例10: HoughCircles
public HoughCircles()
{
using (IplImage imgSrc = new IplImage(Const.ImageWalkman, LoadMode.Color))
using (IplImage imgGray = new IplImage(imgSrc.Size, BitDepth.U8, 1))
using (IplImage imgHough = imgSrc.Clone())
{
Cv.CvtColor(imgSrc, imgGray, ColorConversion.BgrToGray);
Cv.Smooth(imgGray, imgGray, SmoothType.Gaussian, 9);
//Cv.Canny(imgGray, imgGray, 75, 150, ApertureSize.Size3);
using (CvMemStorage storage = new CvMemStorage())
{
CvSeq<CvCircleSegment> seq = imgGray.HoughCircles(storage, HoughCirclesMethod.Gradient, 1, 100, 150, 55, 0, 0);
foreach (CvCircleSegment item in seq)
{
imgHough.Circle(item.Center, (int)item.Radius, CvColor.Red, 3);
}
}
// (5)検出結果表示用のウィンドウを確保し表示する
using (new CvWindow("gray", WindowMode.AutoSize, imgGray))
using (new CvWindow("Hough circles", WindowMode.AutoSize, imgHough))
{
CvWindow.WaitKey(0);
}
}
}
示例11: Undistort
public Undistort()
{
using (IplImage srcImg = new IplImage(FilePath.Image.Distortion, LoadMode.Color))
using (IplImage dstImg = srcImg.Clone())
{
CvMat intrinsic, distortion;
using (CvFileStorage fs = new CvFileStorage(FilePath.Text.Camera, null, FileStorageMode.Read))
{
CvFileNode param = fs.GetFileNodeByName(null, "intrinsic");
intrinsic = fs.Read<CvMat>(param);
param = fs.GetFileNodeByName(null, "distortion");
distortion = fs.Read<CvMat>(param);
}
Cv.Undistort2(srcImg, dstImg, intrinsic, distortion);
using (new CvWindow("Distortion", WindowMode.AutoSize, srcImg))
using (new CvWindow("Undistortion", WindowMode.AutoSize, dstImg))
{
CvWindow.WaitKey(0);
}
intrinsic.Dispose();
distortion.Dispose();
}
}
示例12: Undistort
public Undistort()
{
// cvUndistort2
// キャリブレーションデータを利用して,歪みを補正する
// (1)補正対象となる画像の読み込み
using (IplImage srcImg = new IplImage(Const.ImageDistortion, LoadMode.Color))
using (IplImage dstImg = srcImg.Clone())
{
// (2)パラメータファイルの読み込み
CvMat intrinsic, distortion;
using (CvFileStorage fs = new CvFileStorage(Const.XmlCamera, null, FileStorageMode.Read))
{
CvFileNode param = fs.GetFileNodeByName(null, "intrinsic");
intrinsic = fs.Read<CvMat>(param);
param = fs.GetFileNodeByName(null, "distortion");
distortion = fs.Read<CvMat>(param);
}
// (3)歪み補正
Cv.Undistort2(srcImg, dstImg, intrinsic, distortion);
// (4)画像を表示,キーが押されたときに終了
using (CvWindow w1 = new CvWindow("Distortion", WindowMode.AutoSize, srcImg))
using (CvWindow w2 = new CvWindow("Undistortion", WindowMode.AutoSize, dstImg))
{
CvWindow.WaitKey(0);
}
intrinsic.Dispose();
distortion.Dispose();
}
}
示例13: Perspective
public Perspective()
{
// cvGetPerspectiveTransform + cvWarpPerspective
// 画像上の4点対応より透視投影変換行列を計算し,その行列を用いて画像全体の透視投影変換を行う.
// (1)画像の読み込み,出力用画像領域の確保を行なう
using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImg = srcImg.Clone())
{
// (2)四角形の変換前と変換後の対応する頂点をそれぞれセットし
// cvWarpPerspectiveを用いて透視投影変換行列を求める
CvPoint2D32f[] srcPnt = new CvPoint2D32f[4];
CvPoint2D32f[] dstPnt = new CvPoint2D32f[4];
srcPnt[0] = new CvPoint2D32f(150.0f, 150.0f);
srcPnt[1] = new CvPoint2D32f(150.0f, 300.0f);
srcPnt[2] = new CvPoint2D32f(350.0f, 300.0f);
srcPnt[3] = new CvPoint2D32f(350.0f, 150.0f);
dstPnt[0] = new CvPoint2D32f(200.0f, 200.0f);
dstPnt[1] = new CvPoint2D32f(150.0f, 300.0f);
dstPnt[2] = new CvPoint2D32f(350.0f, 300.0f);
dstPnt[3] = new CvPoint2D32f(300.0f, 200.0f);
using (CvMat mapMatrix = Cv.GetPerspectiveTransform(srcPnt, dstPnt))
{
// (3)指定されたアフィン行列により,cvWarpAffineを用いて画像を回転させる
Cv.WarpPerspective(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(100));
// (4)結果を表示する
using (new CvWindow("src", srcImg))
using (new CvWindow("dst", dstImg))
{
Cv.WaitKey(0);
}
}
}
}
示例14: Affine
public Affine()
{
// cvGetAffineTransform + cvWarpAffine
using (IplImage srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
using (IplImage dstImg = srcImg.Clone())
{
CvPoint2D32f[] srcPnt = new CvPoint2D32f[3];
CvPoint2D32f[] dstPnt = new CvPoint2D32f[3];
srcPnt[0] = new CvPoint2D32f(200.0f, 200.0f);
srcPnt[1] = new CvPoint2D32f(250.0f, 200.0f);
srcPnt[2] = new CvPoint2D32f(200.0f, 100.0f);
dstPnt[0] = new CvPoint2D32f(300.0f, 100.0f);
dstPnt[1] = new CvPoint2D32f(300.0f, 50.0f);
dstPnt[2] = new CvPoint2D32f(200.0f, 100.0f);
using (CvMat mapMatrix = Cv.GetAffineTransform(srcPnt, dstPnt))
{
Cv.WarpAffine(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(0));
using (new CvWindow("src", srcImg))
using (new CvWindow("dst", dstImg))
{
Cv.WaitKey(0);
}
}
}
}
示例15: FaceDetect
public System.Drawing.Bitmap FaceDetect(IplImage src)
{
// CvHaarClassifierCascade, cvHaarDetectObjects
// 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다
CvColor[] colors = new CvColor[]{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
const double scale = 1.04;
const double scaleFactor = 1.139;
const int minNeighbors = 1;
using (IplImage img = src.Clone())
using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1))
{
// 얼굴 검출을 위한 화상을 생성한다.
using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
{
Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
}
using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Environment.CurrentDirectory + "\\" + "haarcascade_frontalface_alt.xml"))
using (CvMemStorage storage = new CvMemStorage())
{
storage.Clear();
// 얼굴을 검출한다.
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(20, 20));
// 검출한 얼굴에 검은색 원을 덮어씌운다.
for (int i = 0; i < faces.Total; i++)
{
CvRect r = faces[i].Value.Rect;
CvPoint center = new CvPoint
{
X = Cv.Round((r.X + r.Width * 0.5) * scale),
Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
};
int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
img.Circle(center, radius, new CvColor(0, 0, 0), -1, LineType.Link8, 0);
}
}
FindFace = img.Clone();
//생성한 IplImage 화상을 비트맵으로 변환해 반환한다.
return FindFace.ToBitmap(System.Drawing.Imaging.PixelFormat.Format24bppRgb);
}
}