本文整理汇总了C#中Mat.Save方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.Save方法的具体用法?C# Mat.Save怎么用?C# Mat.Save使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.Save方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Sticher
public static bool Sticher(IEnumerable<string> fileList, string saveFileLocation)
{
var imageArray = from fileName in fileList
select new Image<Bgr, byte>(fileName);
try
{
using (var stitcher = new Stitcher(false))
{
using (var vm = new VectorOfMat())
{
var result = new Mat();
vm.Push(imageArray.ToArray());
stitcher.Stitch(vm, result);
result.Save(saveFileLocation);
}
}
return true;
}
catch (Exception ex)
{
Logger.Error("Failed to stich !!", ex);
return false;
}
finally
{
foreach (Image<Bgr, Byte> img in imageArray)
{
img.Dispose();
}
}
}
示例2: Copy32BitDataToImage
private void Copy32BitDataToImage(int width, int height, byte[] textData)
{
Stopwatch stopwatch = new Stopwatch();
stopwatch.Start();
// Mat emguMat = new Mat(height, width, Emgu.CV.CvEnum.DepthType.Cv8U, 4, bitmapData.Scan0, width * 4);
GCHandle handle = GCHandle.Alloc(textData, GCHandleType.Pinned);
originalMat = new Mat(new Size(width, height), Emgu.CV.CvEnum.DepthType.Cv8U, 4, handle.AddrOfPinnedObject(), width*4);
handle.Free();
dataTextBox.AppendText(String.Format("Emgu took: {0} ms {1}", stopwatch.Elapsed, Environment.NewLine));
originalMat.Save("mat.bmp");
}
示例3: saveGrayAndThreshold
private string saveGrayAndThreshold(string path, Bitmap bmp)
{
if (bmp == null)
return string.Empty;
Image<Bgr, Byte> imgRBG = new Image<Bgr, Byte>(bmp);
//Mat enhanceFram = new Mat();
//CvInvoke.DetailEnhance(imgRBG, enhanceFram);
//Image<Gray, Byte> imgGray = enhanceFram.ToImage<Gray, Byte>();
Image<Gray, Byte> imgGray = imgRBG.Convert<Gray, Byte>();
// if (
// File.Exists(
// "D:\\MyPrj\\mygitcode\\MyCode\\ExamVideoProcess\\ExamVideoProcess\\bin\\x64\\Debug\\initVideo\\01Grayimg.jpg"))
// {
// Image<Gray, Byte> imgFirst =
// new Image<Gray, Byte>(
// "D:\\MyPrj\\mygitcode\\MyCode\\ExamVideoProcess\\ExamVideoProcess\\bin\\x64\\Debug\\initVideo\\01Grayimg.jpg");
// imgGray = imgGray.AbsDiff(imgFirst);
//
// }
Mat smallGrayFrame = new Mat();
CvInvoke.PyrDown(imgGray, smallGrayFrame);
Mat smoothedGrayFrame = new Mat();
CvInvoke.PyrUp(smallGrayFrame, smoothedGrayFrame);
//Mat cannyFrame = new Mat();
//CvInvoke.Canny(smoothedGrayFrame, cannyFrame, 128, 60);
// Image<Gray, Byte> imgThreshold = imgGray.ThresholdBinary(new Gray(128), new Gray(255));
// imgThreshold.Save(path + "Grayimg.jpg");
smoothedGrayFrame.Save(path + "Grayimg.jpg");
//imgGray.Save(path + "Grayimg.jpg");
return path + "Grayimg.jpg";
//Threshold image might be used if gray image is not proper to analyze.
//Image<Gray, Byte> imgThreshold = imgGray.ThresholdBinary(new Gray(128), new Gray(255));
//imgThreshold.Save(path + "thimg.jpg");
}
示例4: BinaryImage
async Task BinaryImage(Mat gray, int number, Mat canny = null, Mat color = null)
{
var image = gray;
var imageData = EdgePreservingSmoothingBW(gray, 5);
using (Matrix<byte> edgeSmoothingImage = new Matrix<byte>(imageData))
//using (Mat image2 = new Mat(@"IMG_0041-Gray.jpg", LoadImageType.Grayscale))
using (Mat cannyImage = new Mat())
{
await Dispatcher.BeginInvoke(_addImageToTheList,
edgeSmoothingImage.Mat);
edgeSmoothingImage.Save("edgeSmoothingImage" + number + ".jpg");
await Dispatcher.BeginInvoke(_addImageToTheList,
image);
//await Dispatcher.BeginInvoke(_addImageToTheList,
// image2);
var increasedContrasstArray = ChangeContrast(image, 80);
using (var changedContrastImg = new Image<Gray, byte>(increasedContrasstArray))
{
await Dispatcher.BeginInvoke(_addImageToTheList,
changedContrastImg.Mat);
changedContrastImg.Save("changedContrastImg" + number + ".jpg");
//CvInvoke.Threshold(changedContrastImg, cannyImage, 200, 255, ThresholdType.Binary);
//using(Mat sobel = new Mat())
//{
//}
Matrix<byte> sobelMatrix = new Matrix<byte>(image.Size);
var sobelX = new Mat(changedContrastImg.Size, DepthType.Cv8U, 1);
var sobelY = new Mat(changedContrastImg.Size, DepthType.Cv8U, 1);
CvInvoke.Sobel(changedContrastImg, sobelX, DepthType.Cv8U, 1, 0);
CvInvoke.Sobel(changedContrastImg, sobelY, DepthType.Cv8U, 0, 1);
for (int rowIndex = 0; rowIndex < changedContrastImg.Rows; rowIndex++)
{
for (int columnIndex = 0; columnIndex < changedContrastImg.Cols; columnIndex++)
{
var rX = sobelX.GetData(rowIndex, columnIndex)[0];
var rY = sobelY.GetData(rowIndex, columnIndex)[0];
sobelMatrix[rowIndex, columnIndex] = ToByte(Math.Sqrt(rX * rX + rY * rY));
}
}
//CvInvoke.Threshold(sobelMatrix, sobelMatrix, 170, 255, ThresholdType.Binary);
await Dispatcher.BeginInvoke(_addImageToTheList,
sobelMatrix.Mat);
sobelMatrix.Save("sobelMatrix" + number + ".bmp");
CvInvoke.Laplacian(image, sobelMatrix, DepthType.Cv8U, 3, 1, 0, BorderType.Default);
sobelMatrix.Save("laplacian" + number + ".bmp");
//CvInvoke.Threshold(sobelMatrix, sobelMatrix, 170, 255, ThresholdType.Binary);//170-190
//CvInvoke.AdaptiveThreshold(sobelMatrix, sobelMatrix, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary, 11, 2);
var edgeSmoothBWAray = EdgePreservingSmoothingBW(sobelMatrix.Mat, 5);
var edgeMatrix = new Matrix<byte>(edgeSmoothBWAray);
edgeMatrix.Save("laplacian-edgeMatrix" + number + ".bmp");
var thresholdEdge = CustomThreshold(edgeMatrix, 13);
Matrix<byte> thresholdEdgeMatrix = new Matrix<byte>(thresholdEdge);
thresholdEdgeMatrix.Save("laplacian-threshold-2-" + number + ".bmp");
sobelMatrix.Save("laplacian-threshold" + number + ".bmp");
CvInvoke.Canny(changedContrastImg, cannyImage, 150, 224, 3, false);
await Dispatcher.BeginInvoke(_addImageToTheList,
cannyImage);
Matrix<byte> mask = new Matrix<byte>(image.Size);
int dilSize = 2;
Mat se1 = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new System.Drawing.Size(2 * dilSize + 1, 2 * dilSize + 1), new System.Drawing.Point(dilSize, dilSize));
dilSize = 1;
Mat se2 = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new System.Drawing.Size(2 * dilSize + 1, 2 * dilSize + 1), new System.Drawing.Point(dilSize, dilSize));
//CvInvoke.MorphologyEx(sobelMatrix, mask, MorphOp.Close, se1, new System.Drawing.Point(0, 0), 1, BorderType.Default, new MCvScalar(255, 0, 0, 255));
//await Dispatcher.BeginInvoke(_addImageToTheList,
// mask.Mat);
//.........这里部分代码省略.........
示例5: FindBlobs
VectorOfVectorOfPoint FindBlobs(Mat binary, int index)
{
VectorOfVectorOfPoint blobs = new VectorOfVectorOfPoint();
// Fill the label_image with the blobs
// 0 - background
// 1 - unlabelled foreground
// 2+ - labelled foreground
Mat label_image = new Mat();
binary.ConvertTo(label_image, DepthType.Cv8U);
int label_count = 2; // starts at 2 because 0,1 are used already
for (int y = 0; y < label_image.Rows; y++)
{
for (int x = 0; x < label_image.Cols; x++)
{
var val = label_image.GetData(y, x)[0];
if (val != 255)
{
continue;
}
System.Drawing.Rectangle rect;
CvInvoke.FloodFill(label_image, new Mat(), new System.Drawing.Point(x, y), new MCvScalar(label_count), out rect, new MCvScalar(0), new MCvScalar(0), Connectivity.FourConnected, FloodFillType.Default);
//cv::floodFill(label_image, cv::Point(x,y), label_count, &rect, 0, 0, 4);
VectorOfPoint blob = new VectorOfPoint();
for (int i = rect.Y; i < (rect.Y + rect.Height); i++)
{
for (int j = rect.X; j < (rect.X + rect.Width); j++)
{
var val2 = label_image.GetData(y, x)[0];
if (val2 != label_count)
{
continue;
}
blob.Push(new System.Drawing.Point[] { new System.Drawing.Point(j, i) });
}
}
blobs.Push(blob);
label_count++;
}
}
label_image.Save("labeled" + index + ".bmp");
return blobs;
}