本文整理汇总了C#中Mat.ToCvMat方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.ToCvMat方法的具体用法?C# Mat.ToCvMat怎么用?C# Mat.ToCvMat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.ToCvMat方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: CppTest
public CppTest()
{
//PixelAccess();
using (Mat mat = new Mat(Const.ImageLenna, LoadMode.Color))
{
//CvSize s;
//CvPoint p;
//mat.LocateROI(out s, out p);
// CvMatへの変換
CvMat m = mat.ToCvMat();
Console.WriteLine(m);
// 行を一部分切り出し
Mat row = mat.RowRange(100, 200);
// IplImageへ変換し、highguiにより描画
IplImage img = row.ToIplImage();
using (new CvWindow("highgui", img))
{
Cv.WaitKey();
}
// Bitmapに変換して、WindowsFormで描画する
using (Bitmap bitmap = mat.ToBitmap())
using (Form form = new Form() { Text = "WindowsForms", ClientSize = new System.Drawing.Size(bitmap.Width, bitmap.Height) })
using (PictureBox pb = new PictureBox() { Image = bitmap, Dock = DockStyle.Fill })
{
form.Controls.Add(pb);
Application.Run(form);
}
// cv::imshowによる表示
CvCpp.NamedWindow("imshow", WindowMode.AutoSize);
CvCpp.ImShow("imshow", mat);
CvCpp.WaitKey(0);
Cv.DestroyAllWindows();
}
}
示例2: DoTracking
// FaceTracking
void DoTracking()
{
//while (running)
//{
try
{
if (kinect.GetDepthRaw())
{
//lock (this)
//{
src = DoDepthBuffer(kinect.usersDepthMap, KinectWrapper.GetDepthWidth(), KinectWrapper.GetDepthHeight());
roi = src.Clone(new OpenCvSharp.CPlusPlus.Rect(roiX, roiY, roiW, roiH));
roi.ConvertTo(roi, OpenCvSharp.CPlusPlus.MatType.CV_8U, 255.0 / 32000.0);
Cv2.Subtract(new Mat(roiH, roiW, MatType.CV_8UC1, new Scalar(255)), roi, roi);
double threshMax = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMax << 3));
double threshMin = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMin << 3));
roi = roi.Threshold(threshMin, 255.0, ThresholdType.ToZeroInv);
roi = roi.Threshold(threshMax, 255.0, ThresholdType.ToZero);
// Flip up/down dimension and right/left dimension
if (!FlipUpDownAxis && FlipLeftRightAxis)
roi.Flip(FlipMode.XY);
else if (!FlipUpDownAxis)
roi.Flip(FlipMode.X);
else if (FlipLeftRightAxis)
roi.Flip(FlipMode.Y);
//Apply ellliptical mask
Mat ellipseMask = new Mat(roi.Rows, roi.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8U, new Scalar(0.0));
Cv2.Ellipse(ellipseMask, new Point(ellipseMaskCenterX, ellipseMaskCenterY), new Size(axisMaskX, axisMaskY), maskAngle, maskStartAngle, maskEndAngle, new Scalar(255.0), -1);
Cv2.BitwiseAnd(roi, ellipseMask, roi);
//Remove noise by morphologyEx
Mat kernel = Cv2.GetStructuringElement(StructuringElementShape.Ellipse, new Size(3, 3));
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Open, kernel);
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Close, kernel);
//Subtract background
if (first)
{
bg = roi.Clone();
//bg = bg.Blur(new Size(smoothBlur, smoothBlur));
first = false;
}
fg = bg.Clone();
//roi = roi.Blur(new Size(smoothBlur, smoothBlur));
Mat subMask = roi.Clone();
subMask = subMask.Threshold(smThresh, 255.0, ThresholdType.ToZero);
//Cv2.ImShow("sm",subMask);
bg.CopyTo(roi, subMask);
OpenCvSharp.Cv.AbsDiff(roi.ToCvMat(), bg.ToCvMat(), fg.ToCvMat());
//Threshold foreground image
fgthresh = fg.Threshold(threshold, 255.0, ThresholdType.Binary);
fgthresh = fgthresh.Blur(new Size(smoothBlur, smoothBlur));
//Detect Blobs
Mat roiToImg = new Mat(roi.Cols, roi.Rows, MatType.CV_8UC3);
Mat threshToImg = fgthresh.Clone();
Cv2.Merge(new Mat[] { roi, roi, roi }, roiToImg);
IplImage showImg = roiToImg.ToIplImage();
IplImage fgthreshImg = threshToImg.ToIplImage();
OpenCvSharp.Blob.CvBlobLib.Label(fgthreshImg, blobs);
OpenCvSharp.Blob.CvBlobLib.FilterByArea(blobs, blobMinArea, blobMaxArea);
OpenCvSharp.Blob.CvBlobLib.RenderBlobs(blobs, fgthreshImg, showImg, RenderBlobsMode.Color | RenderBlobsMode.Centroid);
UpdateTracks(blobs, tracks, blobMinDistance, blobMaxLife);
//OpenCvSharp.Blob.CvBlobLib.RenderTracks(tracks, fgthreshImg, showImg, RenderTracksMode.BoundingBox | RenderTracksMode.Id);
RenderTracks(showImg);
//Cv.ShowImage("thres", fgthreshImg);
Cv.ShowImage("showBlob", showImg);
//Check Blob Actions
//Debug.Log(tracks.Count);
//}
}
}
catch (System.Exception e)
{
//throw e;
Debug.Log(e.Message + " " + e.StackTrace);
}
//}
}
示例3: GetKeyPoints
/// <summary>
/// StarDetectorアルゴリズムによりキーポイントを取得する
/// </summary>
/// <param name="image">8ビット グレースケールの入力画像</param>
/// <returns></returns>
#else
/// <summary>
/// Retrieves keypoints using the StarDetector algorithm.
/// </summary>
/// <param name="image">The input 8-bit grayscale image</param>
/// <returns></returns>
#endif
public KeyPoint[] GetKeyPoints(Mat image)
{
if (image == null)
throw new ArgumentNullException("img");
using (CvMemStorage storage = new CvMemStorage(0))
{
IntPtr ptr = CvInvoke.cvGetStarKeypoints(image.ToCvMat().CvPtr, storage.CvPtr, _p);
if (ptr == IntPtr.Zero)
{
return new KeyPoint[0];
}
CvSeq<CvStarKeypoint> keypoints = new CvSeq<CvStarKeypoint>(ptr);
KeyPoint[] result = new KeyPoint[keypoints.Total];
for (int i = 0; i < keypoints.Total; i++)
{
CvStarKeypoint kpt = keypoints[i].Value;
result[i] = new KeyPoint(kpt.Pt, (float)kpt.Size, -1.0f, kpt.Response, 0);
}
return result;
}
}
示例4: PerspectiveProject
//透視変換
public Mat PerspectiveProject(Mat srcImg, CvPoint2D32f[] src_Pt, CvPoint2D32f[] dst_Pt)
{
Mat dstImg = new Mat();
dstImg = srcImg.Clone();
//透視変換
CvMat perspective_matrix = Cv.GetPerspectiveTransform(src_Pt, dst_Pt);
Cv.WarpPerspective(srcImg.ToCvMat(), dstImg.ToCvMat(), perspective_matrix, Interpolation.Cubic, new CvScalar(255, 0, 0));
return dstImg;
}
示例5: Update
// Update is called once per frame
void Update()
{
if (runCalibration)
{
if (Input.GetMouseButton(0) || Input.GetMouseButton(1) || Input.GetMouseButton(2))
{
if (Input.GetMouseButton(0))
{
//Debug.Log(Input.mousePosition);
GameObject bc = GameObject.FindGameObjectWithTag("BlueCross");
bc.transform.localPosition = new Vector3(Map(Input.mousePosition.x, Screen.width / 2.0f - 320.0f, Screen.width / 2.0f + 320.0f, 0.0f, 640.0f) - 320.0f, -Map(Input.mousePosition.y, Screen.height / 2.0f + 240.0f, Screen.height / 2.0f - 240.0f, 0.0f, 480.0f) + 240.0f, 0.0f);
}
else if (Input.GetMouseButton(1))
{
GameObject yc = GameObject.FindGameObjectWithTag("YellowCross");
yc.transform.localPosition = new Vector3(Map(Input.mousePosition.x, Screen.width / 2.0f - 320.0f, Screen.width / 2.0f + 320.0f, 0.0f, 640.0f) - 320.0f, -Map(Input.mousePosition.y, Screen.height / 2.0f + 240.0f, Screen.height / 2.0f - 240.0f, 0.0f, 480.0f) + 240.0f, 0.0f);
nextBt = true;
}
else if (Input.GetMouseButton(2) && nextBt == true)
{
if (addKinectPoint())
{
addProjectorPoint();
Debug.Log("Point Added! -> (" + kinectCoordinates.Count + ") ");
nextBt = false;
}
else
{
Debug.Log("Kinect Point out of bounds!");
}
}
}
if (Input.GetKeyDown(KeyCode.A))
{
//PointerEventData pointer = new PointerEventData(EventSystem.current);
//pointer.position = Input.mousePosition;
//List<RaycastResult> raycastResults = new List<RaycastResult>();
//EventSystem.current.RaycastAll(pointer, raycastResults);
if (addKinectPoint())
{
addProjectorPoint();
Debug.Log("Point Added! -> " + kinectCoordinates.Count);
}
else
{
Debug.Log("Kinect Point out of bounds!");
}
}
if (Input.GetKeyDown(KeyCode.S))
{
if (kinectCoordinates.Count >= 8)
{
Debug.Log("Starting Calibration...");
findTransformation(kinectCoordinates, projectorCoordinates);
foundResult = true;
}
else
{
Debug.Log("Not Enough Points!");
}
}
if (Input.GetKeyDown(KeyCode.D) && foundResult == true)
{
showResult = !showResult;
if (!showResult)
{
screenTx.SetPixels32(resetPixels);
screenTx.Apply(false);
}
Debug.Log("Show result toggle: " + showResult);
}
if (Input.GetKeyDown(KeyCode.F) && foundResult == true)
{
using (CvFileStorage fs = new CvFileStorage("KinectCalibration.xml", null, FileStorageMode.Write))
{
string nodeName = "calibResult";
fs.Write(nodeName, result.ToCvMat());
nodeName = "kinectPoints";
Mat kinectPts = new Mat(1, kinectCoordinates.Count, MatType.CV_64FC3);
for (int i = 0; i < kinectCoordinates.Count; i++)
{
kinectPts.Set<CvPoint3D64f>(0, i, (CvPoint3D64f)kinectCoordinates[i]);
}
fs.Write(nodeName, kinectPts.ToCvMat());
nodeName = "projectorPoints";
Mat projPts = new Mat(1, projectorCoordinates.Count, MatType.CV_64FC2);
for (int i = 0; i < projectorCoordinates.Count; i++)
{
projPts.Set<CvPoint2D64f>(0, i, (CvPoint2D64f)projectorCoordinates[i]);
}
fs.Write(nodeName, projPts.ToCvMat());
fs.Dispose();
}
Debug.Log("Calib Data saved!");
}
if (Input.GetKeyDown(KeyCode.Q))
{
delLastPoints();
//.........这里部分代码省略.........