本文整理汇总了C#中Mat.Flip方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.Flip方法的具体用法?C# Mat.Flip怎么用?C# Mat.Flip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.Flip方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: DoTracking
// FaceTracking
void DoTracking()
{
//while (running)
//{
try
{
if (kinect.GetDepthRaw())
{
//lock (this)
//{
src = DoDepthBuffer(kinect.usersDepthMap, KinectWrapper.GetDepthWidth(), KinectWrapper.GetDepthHeight());
roi = src.Clone(new OpenCvSharp.CPlusPlus.Rect(roiX, roiY, roiW, roiH));
roi.ConvertTo(roi, OpenCvSharp.CPlusPlus.MatType.CV_8U, 255.0 / 32000.0);
Cv2.Subtract(new Mat(roiH, roiW, MatType.CV_8UC1, new Scalar(255)), roi, roi);
double threshMax = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMax << 3));
double threshMin = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMin << 3));
roi = roi.Threshold(threshMin, 255.0, ThresholdType.ToZeroInv);
roi = roi.Threshold(threshMax, 255.0, ThresholdType.ToZero);
// Flip up/down dimension and right/left dimension
if (!FlipUpDownAxis && FlipLeftRightAxis)
roi.Flip(FlipMode.XY);
else if (!FlipUpDownAxis)
roi.Flip(FlipMode.X);
else if (FlipLeftRightAxis)
roi.Flip(FlipMode.Y);
//Apply ellliptical mask
Mat ellipseMask = new Mat(roi.Rows, roi.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8U, new Scalar(0.0));
Cv2.Ellipse(ellipseMask, new Point(ellipseMaskCenterX, ellipseMaskCenterY), new Size(axisMaskX, axisMaskY), maskAngle, maskStartAngle, maskEndAngle, new Scalar(255.0), -1);
Cv2.BitwiseAnd(roi, ellipseMask, roi);
//Remove noise by morphologyEx
Mat kernel = Cv2.GetStructuringElement(StructuringElementShape.Ellipse, new Size(3, 3));
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Open, kernel);
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Close, kernel);
//Subtract background
if (first)
{
bg = roi.Clone();
//bg = bg.Blur(new Size(smoothBlur, smoothBlur));
first = false;
}
fg = bg.Clone();
//roi = roi.Blur(new Size(smoothBlur, smoothBlur));
Mat subMask = roi.Clone();
subMask = subMask.Threshold(smThresh, 255.0, ThresholdType.ToZero);
//Cv2.ImShow("sm",subMask);
bg.CopyTo(roi, subMask);
OpenCvSharp.Cv.AbsDiff(roi.ToCvMat(), bg.ToCvMat(), fg.ToCvMat());
//Threshold foreground image
fgthresh = fg.Threshold(threshold, 255.0, ThresholdType.Binary);
fgthresh = fgthresh.Blur(new Size(smoothBlur, smoothBlur));
//Detect Blobs
Mat roiToImg = new Mat(roi.Cols, roi.Rows, MatType.CV_8UC3);
Mat threshToImg = fgthresh.Clone();
Cv2.Merge(new Mat[] { roi, roi, roi }, roiToImg);
IplImage showImg = roiToImg.ToIplImage();
IplImage fgthreshImg = threshToImg.ToIplImage();
OpenCvSharp.Blob.CvBlobLib.Label(fgthreshImg, blobs);
OpenCvSharp.Blob.CvBlobLib.FilterByArea(blobs, blobMinArea, blobMaxArea);
OpenCvSharp.Blob.CvBlobLib.RenderBlobs(blobs, fgthreshImg, showImg, RenderBlobsMode.Color | RenderBlobsMode.Centroid);
UpdateTracks(blobs, tracks, blobMinDistance, blobMaxLife);
//OpenCvSharp.Blob.CvBlobLib.RenderTracks(tracks, fgthreshImg, showImg, RenderTracksMode.BoundingBox | RenderTracksMode.Id);
RenderTracks(showImg);
//Cv.ShowImage("thres", fgthreshImg);
Cv.ShowImage("showBlob", showImg);
//Check Blob Actions
//Debug.Log(tracks.Count);
//}
}
}
catch (System.Exception e)
{
//throw e;
Debug.Log(e.Message + " " + e.StackTrace);
}
//}
}
示例2: Texture2DToMat
// Convert the Texture2D type of Unity to OpenCV's CvMat
// This uses Adcock's parallel C# code to parallelize the conversion and make it faster
// I found the code execution dropped from 180 msec per frame to 70 msec per frame with parallelization
void Texture2DToMat(Texture2D tex, Mat m)
{
//float startTime = Time.realtimeSinceStartup;
Color[] pixels = tex.GetPixels();
// Parallel for loop
Parallel.For(0, imHeight, i =>
{
for (var j = 0; j < imWidth; j++)
{
var pixel = pixels[j + i * imWidth];
var col = new CvScalar
{
Val0 = (double)pixel.b * 255,
Val1 = (double)pixel.g * 255,
Val2 = (double)pixel.r * 255
};
m.Set(i, j, col);
}
});
// CvScalar col;
// Color pixel;
// int i, j;
//
// // Non-parallelized code
// for (i = 0; i < imHeight; i++) {
// for (j = 0; j < imWidth; j++) {
// pixel = pixels [j + i * imWidth];
//
// col = new CvScalar
// {
// Val0 = (double)pixel.b * 255,
// Val1 = (double)pixel.g * 255,
// Val2 = (double)pixel.r * 255
// };
//
// videoSourceImage.Set2D (i, j, col);
// }
//
// }
// Flip up/down dimension and right/left dimension
if (!FlipUpDownAxis && FlipLeftRightAxis)
m.Flip(FlipMode.XY);
else if (!FlipUpDownAxis)
m.Flip(FlipMode.X);
else if (FlipLeftRightAxis)
m.Flip(FlipMode.Y);
// Test difference in time between parallel and non-parallel code
//Debug.Log (Time.realtimeSinceStartup - startTime);
}
示例3: DoDepthBuffer
// get a CV_8U matrix from a Kinect depth frame
private Mat DoDepthBuffer(ushort[] depthData, int width, int height)
{
Mat outp = new Mat(height, width, OpenCvSharp.CPlusPlus.MatType.CV_16U, depthData);
// Flip up/down dimension and right/left dimension
if (!FlipUpDownAxis && FlipLeftRightAxis)
outp = outp.Flip(FlipMode.XY);
else if (!FlipUpDownAxis)
outp = outp.Flip(FlipMode.X);
else if (FlipLeftRightAxis)
outp = outp.Flip(FlipMode.Y);
return outp;
}