本文整理汇总了C#中Mat.Clone方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.Clone方法的具体用法?C# Mat.Clone怎么用?C# Mat.Clone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.Clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FilterTiles
public void FilterTiles(Mat image, Mat modifiedMat)
{
CvInvoke.Imshow("0", image);
Stopwatch sw1 = new Stopwatch();
sw1.Start();
Mat laplaced = new Mat();
CvInvoke.CvtColor(image, laplaced, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
Mat greyResult = laplaced.Clone();
Mat greySource = laplaced.Clone();
Mat cannySrc = new Mat();
//if not half inch, do canny and subtract to separate tiles better. Basically "sharpens" the edge
if (scan.TileSettings.CannyEdges)
{
//create canny image, these parameters could be adjusted probably?
CvInvoke.Canny(greySource, greyResult, 50, 150);
//dilate canny
CvInvoke.Dilate(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
CvInvoke.Imshow("1a", greyResult);
//subtract dilated canny from source to get separation
CvInvoke.Subtract(greySource, greyResult, greyResult);
greySource = greyResult.Clone();
CvInvoke.Imshow("1b", greyResult);
}
if (scan.TileSettings.ThresholdEdges)
{
Mat edges = new Mat();
CvInvoke.Threshold(greyResult, edges, (float)thresholdTrackbar.Value, 0, ThresholdType.ToZero);
CvInvoke.Subtract(greySource, edges, greyResult);
CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), 2, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
CvInvoke.Imshow("pres-1c", greyResult);
}
//perform distance transform
CvInvoke.DistanceTransform(greyResult, greyResult, null, DistType.L2, 5);
//normalize the image to bring out the peaks
CvInvoke.Normalize(greyResult, greyResult, 0, 1, NormType.MinMax);
CvInvoke.Imshow("2", greyResult);
//threshold the image, different thresholds for different tiles
CvInvoke.Threshold(greyResult, greyResult, scan.TileSettings.ThresholdVal, 1, ThresholdType.Binary);
CvInvoke.Imshow("3", greyResult);
//erode to split the blobs
CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(-1, -1), scan.TileSettings.ThresholdErode, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
//convert to 8 bit unsigned needed for canny
greyResult.ConvertTo(greyResult, DepthType.Cv8U);
VectorOfVectorOfPoint markers = new VectorOfVectorOfPoint();
//create 32bit, single channel image for result of markers
Mat markerImage = new Mat(greyResult.Size, DepthType.Cv32S, 1);
//set image to 0
markerImage.SetTo(new MCvScalar(0, 0, 0));
//find the contours
CvInvoke.FindContours(greyResult, markers, null, RetrType.External, ChainApproxMethod.LinkRuns);
//label the markers from 1 -> n, the rest of the image should remain 0
for (int i = 0; i < markers.Size; i++)
CvInvoke.DrawContours(markerImage, markers, i, new MCvScalar(i + 1, i + 1, i + 1), -1);
ScalarArray mult = new ScalarArray(5000);
Mat markerVisual = new Mat();
CvInvoke.Multiply(markerImage, mult, markerVisual);
CvInvoke.Imshow("4", markerVisual);
//draw the background marker
CvInvoke.Circle(markerImage,
new System.Drawing.Point(5, 5),
3,
new MCvScalar(255, 255, 255),
-1);
//convert to 3 channel
Mat convertedOriginal = new Mat();
//use canny modified if 3/4", or use the gray image for others
CvInvoke.CvtColor(greySource, convertedOriginal, ColorConversion.Gray2Bgr);
//watershed!!
CvInvoke.Watershed(convertedOriginal, markerImage);
//visualize
CvInvoke.Multiply(markerImage, mult, markerVisual);
CvInvoke.Imshow("5", markerVisual);
//get contours to get the actual tiles now that they are separate...
//.........这里部分代码省略.........
示例2: DetectFace
/// <summary>
///
/// </summary>
/// <param name="cascade"></param>
/// <returns></returns>
private Mat DetectFace(CascadeClassifier cascade)
{
Mat result;
using (var src = new Mat(FilePath.Image.Yalta, LoadMode.Color))
using (var gray = new Mat())
{
result = src.Clone();
Cv2.CvtColor(src, gray, ColorConversion.BgrToGray, 0);
// Detect faces
Rect[] faces = cascade.DetectMultiScale(
gray, 1.08, 2, HaarDetectionType.ScaleImage, new Size(30, 30));
// Render all detected faces
foreach (Rect face in faces)
{
var center = new Point
{
X = (int)(face.X + face.Width * 0.5),
Y = (int)(face.Y + face.Height * 0.5)
};
var axes = new Size
{
Width = (int)(face.Width * 0.5),
Height = (int)(face.Height * 0.5)
};
Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4);
}
}
return result;
}
示例3: SelectStitchingImages
private static Mat[] SelectStitchingImages(int width, int height, int count)
{
Mat source = new Mat(@"img\Penguins.jpg", LoadMode.Color);
Mat result = source.Clone();
var rand = new Random();
var mats = new List<Mat>();
for (int i = 0; i < count; i++)
{
int x1 = rand.Next(source.Cols - width);
int y1 = rand.Next(source.Rows - height);
int x2 = x1 + width;
int y2 = y1 + height;
result.Line(new Point(x1, y1), new Point(x1, y2), new Scalar(0, 0, 255));
result.Line(new Point(x1, y2), new Point(x2, y2), new Scalar(0, 0, 255));
result.Line(new Point(x2, y2), new Point(x2, y1), new Scalar(0, 0, 255));
result.Line(new Point(x2, y1), new Point(x1, y1), new Scalar(0, 0, 255));
Mat m = source[new Rect(x1, y1, width, height)];
mats.Add(m.Clone());
}
using (new Window(result))
{
Cv.WaitKey();
}
return mats.ToArray();
}
示例4: LineSegmentDetectorSample
private static void LineSegmentDetectorSample()
{
var img = new Mat("data/shapes.png", ImreadModes.GrayScale);
var lines = new Mat();
var view = img.Clone();
var detector = LineSegmentDetector.Create();
detector.Detect(img, lines);
detector.DrawSegments(view, lines);
Window.ShowImages(view);
}
示例5: Run
public void Run()
{
using (Mat src = new Mat(FilePath.Image.Distortion, LoadMode.Color))
using (Mat gray = new Mat())
using (Mat dst = src.Clone())
{
Cv2.CvtColor(src, gray, ColorConversion.BgrToGray);
CppStyleMSER(gray, dst); // C++ style
using (new Window("MSER src", src))
using (new Window("MSER gray", gray))
using (new Window("MSER dst", dst))
{
Cv.WaitKey();
}
}
}
示例6: MatchTemplateSample
private static void MatchTemplateSample()
{
var src = new Mat(@"data\mt_src.png");
var template = new Mat(@"data\template.png");
var mask = new Mat(@"data\mask.png");
var result = new Mat();
Cv2.MatchTemplate(src, template, result, TemplateMatchModes.CCorrNormed, mask);
double minVal, maxVal;
Point minLoc, maxLoc;
Cv2.MinMaxLoc(result, out minVal, out maxVal, out minLoc, out maxLoc);
var view = src.Clone();
view.Rectangle(maxLoc, new Point(maxLoc.X + template.Width, maxLoc.Y + template.Height), Scalar.Red, 2);
Console.WriteLine(maxVal);
Window.ShowImages(view);
}
示例7: SampleCpp
/// <summary>
/// sample of new C++ style wrapper
/// </summary>
private void SampleCpp()
{
// (1)画像の読み込み
using (Mat imgGray = new Mat(Const.ImageGoryokaku, LoadMode.GrayScale))
using (Mat imgStd = new Mat(Const.ImageGoryokaku, LoadMode.Color))
using (Mat imgProb = imgStd.Clone())
{
// ハフ変換のための前処理
CvCpp.Canny(imgGray, imgGray, 50, 200, ApertureSize.Size3, false);
// (3)標準的ハフ変換による線の検出と検出した線の描画
CvLineSegmentPolar[] segStd = CvCpp.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);
int limit = Math.Min(segStd.Length, 10);
for (int i = 0; i < limit; i++ )
{
float rho = segStd[i].Rho;
float theta = segStd[i].Theta;
double a = Math.Cos(theta);
double b = Math.Sin(theta);
double x0 = a * rho;
double y0 = b * rho;
CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
imgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
}
// (4)確率的ハフ変換による線分の検出と検出した線分の描画
CvLineSegmentPoint[] segProb = CvCpp.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
foreach (CvLineSegmentPoint s in segProb)
{
imgProb.Line(s.P1, s.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
}
// (5)検出結果表示用のウィンドウを確保し表示する
using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, imgStd.ToIplImage()))
using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, imgProb.ToIplImage()))
{
CvWindow.WaitKey(0);
}
}
}
示例8: SampleCpp
/// <summary>
/// sample of new C++ style wrapper
/// </summary>
private void SampleCpp()
{
// (1) Load the image
using (Mat imgGray = new Mat(FilePath.Image.Goryokaku, LoadMode.GrayScale))
using (Mat imgStd = new Mat(FilePath.Image.Goryokaku, LoadMode.Color))
using (Mat imgProb = imgStd.Clone())
{
// Preprocess
Cv2.Canny(imgGray, imgGray, 50, 200, 3, false);
// (3) Run Standard Hough Transform
CvLineSegmentPolar[] segStd = Cv2.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);
int limit = Math.Min(segStd.Length, 10);
for (int i = 0; i < limit; i++ )
{
// Draws result lines
float rho = segStd[i].Rho;
float theta = segStd[i].Theta;
double a = Math.Cos(theta);
double b = Math.Sin(theta);
double x0 = a * rho;
double y0 = b * rho;
Point pt1 = new Point { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
Point pt2 = new Point { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
imgStd.Line(pt1, pt2, Scalar.Red, 3, LineType.AntiAlias, 0);
}
// (4) Run Probabilistic Hough Transform
CvLineSegmentPoint[] segProb = Cv2.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
foreach (CvLineSegmentPoint s in segProb)
{
imgProb.Line(s.P1, s.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
}
// (5) Show results
using (new Window("Hough_line_standard", WindowMode.AutoSize, imgStd))
using (new Window("Hough_line_probabilistic", WindowMode.AutoSize, imgProb))
{
CvWindow.WaitKey(0);
}
}
}
示例9: Run
public void Run()
{
using (Mat imgSrc = new Mat(FilePath.Lenna, LoadMode.Color))
using (Mat imgGray = new Mat())
using (Mat imgDst = imgSrc.Clone())
{
Cv2.CvtColor(imgSrc, imgGray, ColorConversion.BgrToGray, 0);
KeyPoint[] keypoints;
Cv2.FAST(imgGray, out keypoints, 50, true);
foreach (KeyPoint kp in keypoints)
{
imgDst.Circle(kp.Pt, 3, CvColor.Red, -1, LineType.AntiAlias, 0);
}
Cv2.ImShow("FAST", imgDst);
Cv2.WaitKey(0);
Cv2.DestroyAllWindows();
}
}
示例10: FAST
public FAST()
{
IplImage img = new IplImage(Const.ImageLenna, LoadMode.Color);
using (Mat imgSrc = new Mat(img))
//using (Mat imgSrc = new Mat(Const.ImageLenna, LoadMode.Color))
using (Mat imgGray = new Mat(imgSrc.Size, MatrixType.U8C1))
using (Mat imgDst = imgSrc.Clone())
{
CvCpp.CvtColor(imgSrc, imgGray, ColorConversion.BgrToGray, 0);
KeyPoint[] keypoints;
CvCpp.FAST(imgGray, out keypoints, 50, true);
foreach (KeyPoint kp in keypoints)
{
imgDst.Circle(kp.Pt, 3, CvColor.Red, -1, LineType.AntiAlias, 0);
}
CvCpp.ImShow("FAST", imgDst);
CvCpp.WaitKey(0);
Cv.DestroyAllWindows();
}
}
示例11: SumMat
//合成
public Mat SumMat(Mat srcMat_0, Mat srcMat_1,bool Is)
{
Mat dstMat = srcMat_0.Clone();
unsafe
{
byte* srcPtr_0 = srcMat_0.DataPointer;
byte* srcPtr_1 = srcMat_1.DataPointer;
byte* dstPtr = dstMat.DataPointer;
for (int i = 0; i < this.imageHeight * this.imageWidth; i++)
{
if (Is)
{
if ((*(srcPtr_0 + i * this.imgChannles) == 0) && (*(srcPtr_1 + i * this.imgChannles) == 0))
{
for (int j = 0; j < this.imgChannles; j++)
{
*(dstPtr + i * this.imgChannles + j) = 0;
}
}
else if (*(srcPtr_0 + i * this.imgChannles) != 0)
{
for (int j = 0; j < this.imgChannles; j++)
{
*(dstPtr + i * this.imgChannles + j) = *(srcPtr_0 + i * this.imgChannles + j);
}
}
else if (*(srcPtr_1 + i * this.imgChannles) != 0)
{
for (int j = 0; j < this.imgChannles; j++)
{
*(dstPtr + i * this.imgChannles + j) = *(srcPtr_1 + i * this.imgChannles + j);
}
}
}
else
{
if ((*(srcPtr_0 + i * this.imgChannles) > 200) && (*(srcPtr_1 + i * this.imgChannles) > 200))
{
for (int j = 0; j < this.imgChannles; j++)
{
*(dstPtr + i * this.imgChannles + j) = 255;
}
}
else
{
for (int j = 0; j < this.imgChannles; j++)
{
*(dstPtr + i * this.imgChannles + j) = 0;
}
}
}
}
}
return dstMat;
}
示例12: PerspectiveProject
//透視変換
public Mat PerspectiveProject(Mat srcImg, CvPoint2D32f[] src_Pt, CvPoint2D32f[] dst_Pt)
{
Mat dstImg = new Mat();
dstImg = srcImg.Clone();
//透視変換
CvMat perspective_matrix = Cv.GetPerspectiveTransform(src_Pt, dst_Pt);
Cv.WarpPerspective(srcImg.ToCvMat(), dstImg.ToCvMat(), perspective_matrix, Interpolation.Cubic, new CvScalar(255, 0, 0));
return dstImg;
}
示例13: Update
private void Update(ref Mat src, ref Mat dst)
{
try
{
Cv2.CvtColor(src, bufimage, OpenCvSharp.ColorConversion.BgrToGray);
unsafe
{
byte* pPixel = bufimage.DataPointer;
Random r = new Random();
for (int y = 0; y < _h; y++)
{
for (int x = 0; x < _w; x++)
{
//黒くない点は生きてる
if (*pPixel > 100)
{
m_Field[y][x] = true;
if (r.Next(0, 100) < 60)
{ //ofRandom(0,100) < 80だった
//m_Field[y][x] = true;
}
}
else
{
m_Field[y][x] = false;
}
*pPixel = m_Field[y][x] ? (byte)255 : (byte)0;
pPixel++;
}
}
}
/////
// //////
Cv2.Dilate(bufimage, bufimage, m_element);
Cv2.Dilate(bufimage, bufimage, m_element);
Cv2.Erode(bufimage, bufimage, m_element);
Cv2.Erode(bufimage, bufimage, m_element);
//*********************************************************************************************
//ここからメイン
//*********************************************************************************************
////***********************************************************
////cvFindContoursを用いた輪郭抽出*****************************
Mat tmp_bufImage_next;
Mat tmp_bufImage_next3;
//TODO:移動可
tmp_bufImage_next = new Mat(new Size(_w, _h), MatType.CV_8UC1, new Scalar(0));
tmp_bufImage_next3 = new Mat(new Size(_w, _h), MatType.CV_8UC1, new Scalar(0));
bufimage.CopyTo(tmp_bufImage_next);
Point[][] contours;
HierarchyIndex[] hierarchy;
/// Find contours
Cv2.FindContours(tmp_bufImage_next, out contours, out hierarchy, OpenCvSharp.ContourRetrieval.Tree, OpenCvSharp.ContourChain.ApproxNone);
/// Draw contours
for (int i = 0; i < contours.Length; i++)
{
Scalar color = new Scalar(255);
//Cv2.DrawContours(tmp_bufImage_next3, contours, i, color, 2, OpenCvSharp.LineType.Link8, hierarchy, 0);
Cv2.FillPoly(tmp_bufImage_next3, contours, color);
}
//cvClearSeq(contours); //これはいらないみたい
////***********************************************************
////残像処理***************************************************
innerGrayBuffer2 -= 0.2; //param.slider[0];
outerGrayBuffer2 -= 0.2; //param.slider[1];
outerGrayBuffer2 += tmp_bufImage_next3;
innerGrayBuffer2 += tmp_bufImage_next3.Clone() - 230.0;
for (int i = 0; i < 3; i++)
{ //(int)param.slider[2]
Cv2.Erode(innerGrayBuffer2, innerGrayBuffer2, m_element);
}
//.........这里部分代码省略.........
示例14: Surf
/*
private static void Surf()
{
Mat src = new Mat("data/match1.png");
Mat src2 = new Mat("data/match2.png");
//Detect the keypoints and generate their descriptors using SURF
SURF surf = SURF.Create(500, 4, 2, true);
KeyPoint[] keypoints1, keypoints2;
MatOfFloat descriptors1 = new MatOfFloat();
MatOfFloat descriptors2 = new MatOfFloat();
surf.Compute(src, null, out keypoints1, descriptors1);
surf.Compute(src2, null, out keypoints2, descriptors2);
// Matching descriptor vectors with a brute force matcher
BFMatcher matcher = new BFMatcher(NormType.L2, false);
DMatch[] matches = matcher.Match(descriptors1, descriptors2);//例外が発生する箇所
Mat view = new Mat();
Cv2.DrawMatches(src, keypoints1, src2, keypoints2, matches, view);
Window.ShowImages(view);
}*/
private static Mat[] StitchingPreprocess(int width, int height, int count)
{
Mat source = new Mat(@"C:\Penguins.jpg", ImreadModes.Color);
Mat result = source.Clone();
var rand = new Random();
var mats = new List<Mat>();
for (int i = 0; i < count; i++)
{
int x1 = rand.Next(source.Cols - width);
int y1 = rand.Next(source.Rows - height);
int x2 = x1 + width;
int y2 = y1 + height;
result.Line(new Point(x1, y1), new Point(x1, y2), new Scalar(0, 0, 255));
result.Line(new Point(x1, y2), new Point(x2, y2), new Scalar(0, 0, 255));
result.Line(new Point(x2, y2), new Point(x2, y1), new Scalar(0, 0, 255));
result.Line(new Point(x2, y1), new Point(x1, y1), new Scalar(0, 0, 255));
Mat m = source[new Rect(x1, y1, width, height)];
mats.Add(m.Clone());
//string outFile = String.Format(@"C:\temp\stitching\{0:D3}.png", i);
//m.SaveImage(outFile);
}
result.SaveImage(@"C:\temp\parts.png");
using (new Window(result))
{
Cv2.WaitKey();
}
return mats.ToArray();
}
示例15: InitImage
public void InitImage(Transporter Sender, Mat mat)
{
if (m_SenderList == null)
{
return;
}
if (Sender != this.m_SenderList[0])
{
return;
}
this.m_ProjectionImageMatrix = mat.Clone();
this.m_ProjectionImageMatrix.SetTo(black);
}