本文整理汇总了C#中OpenCvSharp.Mat.CopyTo方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.CopyTo方法的具体用法?C# Mat.CopyTo怎么用?C# Mat.CopyTo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OpenCvSharp.Mat
的用法示例。
在下文中一共展示了Mat.CopyTo方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FilterByLabels
/// <summary>
/// Filter a image with the specified label values.
/// </summary>
/// <param name="src">Source image.</param>
/// <param name="dst">Destination image.</param>
/// <param name="labelValues">Label values.</param>
/// <returns>Filtered image.</returns>
public Mat FilterByLabels(Mat src, Mat dst, IEnumerable<int> labelValues)
{
if (src == null)
throw new ArgumentNullException("src");
if (dst == null)
throw new ArgumentNullException("dst");
if (labelValues == null)
throw new ArgumentNullException("labelValues");
int[] labelArray = EnumerableEx.ToArray(labelValues);
if (labelArray.Length == 0)
throw new ArgumentException("empty labelValues");
foreach (int labelValue in labelArray)
{
if (labelValue < 0 || labelValue >= LabelCount)
throw new ArgumentException("0 <= x < LabelCount");
}
// マスク用Matを用意し、Andで切り抜く
using (Mat mask = GetLabelMask(labelArray[0]))
{
for (int i = 1; i < labelArray.Length; i++)
{
using (var maskI = GetLabelMask(labelArray[i]))
{
Cv2.BitwiseOr(mask, maskI, mask);
}
}
src.CopyTo(dst, mask);
return dst;
}
}
示例2: VoteForUniqueness
private static void VoteForUniqueness(DMatch[][] matches, Mat mask, float uniqnessThreshold = 0.80f)
{
byte[] maskData = new byte[matches.Length];
GCHandle maskHandle = GCHandle.Alloc(maskData, GCHandleType.Pinned);
using (Mat m = new Mat(matches.Length, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
{
mask.CopyTo(m);
for (int i = 0; i < matches.Length; i++)
{
//This is also known as NNDR Nearest Neighbor Distance Ratio
if ((matches[i][0].Distance / matches[i][1].Distance) <= uniqnessThreshold)
maskData[i] = 255;
else
maskData[i] = 0;
}
m.CopyTo(mask);
}
maskHandle.Free();
}
示例3: Run
public void Run()
{
using (var img1 = new Mat(FilePath.Image.SurfBox))
using (var img2 = new Mat(FilePath.Image.SurfBoxinscene))
using (var descriptors1 = new Mat())
using (var descriptors2 = new Mat())
using (var matcher = new BFMatcher(NormTypes.L2SQR))
using (var kaze = KAZE.Create())
{
KeyPoint[] keypoints1, keypoints2;
kaze.DetectAndCompute(img1, null, out keypoints1, descriptors1);
kaze.DetectAndCompute(img2, null, out keypoints2, descriptors2);
DMatch[][] matches = matcher.KnnMatch(descriptors1, descriptors2, 2);
using (Mat mask = new Mat(matches.Length, 1, MatType.CV_8U))
{
mask.SetTo(new Scalar(255));
int nonZero = Cv2.CountNonZero(mask);
VoteForUniqueness(matches, mask);
nonZero = Cv2.CountNonZero(mask);
nonZero = VoteForSizeAndOrientation(keypoints2, keypoints1, matches, mask, 1.5f, 20);
List<Point2f> obj = new List<Point2f>();
List<Point2f> scene = new List<Point2f>();
List<DMatch> goodMatchesList = new List<DMatch>();
//iterate through the mask only pulling out nonzero items because they're matches
for (int i = 0; i < mask.Rows; i++)
{
MatIndexer<byte> maskIndexer = mask.GetGenericIndexer<byte>();
if (maskIndexer[i] > 0)
{
obj.Add(keypoints1[matches[i][0].QueryIdx].Pt);
scene.Add(keypoints2[matches[i][0].TrainIdx].Pt);
goodMatchesList.Add(matches[i][0]);
}
}
List<Point2d> objPts = obj.ConvertAll(Point2fToPoint2d);
List<Point2d> scenePts = scene.ConvertAll(Point2fToPoint2d);
if (nonZero >= 4)
{
Mat homography = Cv2.FindHomography(objPts, scenePts, HomographyMethods.Ransac, 1.5, mask);
nonZero = Cv2.CountNonZero(mask);
if (homography != null)
{
Point2f[] objCorners = { new Point2f(0, 0),
new Point2f(img1.Cols, 0),
new Point2f(img1.Cols, img1.Rows),
new Point2f(0, img1.Rows) };
Point2d[] sceneCorners = MyPerspectiveTransform3(objCorners, homography);
//This is a good concat horizontal
using (Mat img3 = new Mat(Math.Max(img1.Height, img2.Height), img2.Width + img1.Width, MatType.CV_8UC3))
using (Mat left = new Mat(img3, new Rect(0, 0, img1.Width, img1.Height)))
using (Mat right = new Mat(img3, new Rect(img1.Width, 0, img2.Width, img2.Height)))
{
img1.CopyTo(left);
img2.CopyTo(right);
byte[] maskBytes = new byte[mask.Rows * mask.Cols];
mask.GetArray(0, 0, maskBytes);
Cv2.DrawMatches(img1, keypoints1, img2, keypoints2, goodMatchesList, img3, Scalar.All(-1), Scalar.All(-1), maskBytes, DrawMatchesFlags.NotDrawSinglePoints);
List<List<Point>> listOfListOfPoint2D = new List<List<Point>>();
List<Point> listOfPoint2D = new List<Point>();
listOfPoint2D.Add(new Point(sceneCorners[0].X + img1.Cols, sceneCorners[0].Y));
listOfPoint2D.Add(new Point(sceneCorners[1].X + img1.Cols, sceneCorners[1].Y));
listOfPoint2D.Add(new Point(sceneCorners[2].X + img1.Cols, sceneCorners[2].Y));
listOfPoint2D.Add(new Point(sceneCorners[3].X + img1.Cols, sceneCorners[3].Y));
listOfListOfPoint2D.Add(listOfPoint2D);
img3.Polylines(listOfListOfPoint2D, true, Scalar.LimeGreen, 2);
//This works too
//Cv2.Line(img3, scene_corners[0] + new Point2d(img1.Cols, 0), scene_corners[1] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
//Cv2.Line(img3, scene_corners[1] + new Point2d(img1.Cols, 0), scene_corners[2] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
//Cv2.Line(img3, scene_corners[2] + new Point2d(img1.Cols, 0), scene_corners[3] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
//Cv2.Line(img3, scene_corners[3] + new Point2d(img1.Cols, 0), scene_corners[0] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
img3.SaveImage("Kaze_Output.png");
Window.ShowImages(img3);
}
}
}
}
}
}
示例4: VoteForSizeAndOrientation
static int VoteForSizeAndOrientation(KeyPoint[] modelKeyPoints, KeyPoint[] observedKeyPoints, DMatch[][] matches, Mat mask, float scaleIncrement, int rotationBins)
{
int idx = 0;
int nonZeroCount = 0;
byte[] maskMat = new byte[mask.Rows];
GCHandle maskHandle = GCHandle.Alloc(maskMat, GCHandleType.Pinned);
using (Mat m = new Mat(mask.Rows, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
{
mask.CopyTo(m);
List<float> logScale = new List<float>();
List<float> rotations = new List<float>();
double s, maxS, minS, r;
maxS = -1.0e-10f; minS = 1.0e10f;
//if you get an exception here, it's because you're passing in the model and observed keypoints backwards. Just switch the order.
for (int i = 0; i < maskMat.Length; i++)
{
if (maskMat[i] > 0)
{
KeyPoint observedKeyPoint = observedKeyPoints[i];
KeyPoint modelKeyPoint = modelKeyPoints[matches[i][0].TrainIdx];
s = Math.Log10(observedKeyPoint.Size / modelKeyPoint.Size);
logScale.Add((float)s);
maxS = s > maxS ? s : maxS;
minS = s < minS ? s : minS;
r = observedKeyPoint.Angle - modelKeyPoint.Angle;
r = r < 0.0f ? r + 360.0f : r;
rotations.Add((float)r);
}
}
int scaleBinSize = (int)Math.Ceiling((maxS - minS) / Math.Log10(scaleIncrement));
if (scaleBinSize < 2)
scaleBinSize = 2;
float[] scaleRanges = { (float)minS, (float)(minS + scaleBinSize + Math.Log10(scaleIncrement)) };
using (MatOfFloat scalesMat = new MatOfFloat(rows: logScale.Count, cols: 1, data: logScale.ToArray()))
using (MatOfFloat rotationsMat = new MatOfFloat(rows: rotations.Count, cols: 1, data: rotations.ToArray()))
using (MatOfFloat flagsMat = new MatOfFloat(logScale.Count, 1))
using (Mat hist = new Mat())
{
flagsMat.SetTo(new Scalar(0.0f));
float[] flagsMatFloat1 = flagsMat.ToArray();
int[] histSize = { scaleBinSize, rotationBins };
float[] rotationRanges = { 0.0f, 360.0f };
int[] channels = { 0, 1 };
Rangef[] ranges = { new Rangef(scaleRanges[0], scaleRanges[1]), new Rangef(rotations.Min(), rotations.Max()) };
double minVal, maxVal;
Mat[] arrs = { scalesMat, rotationsMat };
Cv2.CalcHist(arrs, channels, null, hist, 2, histSize, ranges);
Cv2.MinMaxLoc(hist, out minVal, out maxVal);
Cv2.Threshold(hist, hist, maxVal * 0.5, 0, ThresholdTypes.Tozero);
Cv2.CalcBackProject(arrs, channels, hist, flagsMat, ranges);
MatIndexer<float> flagsMatIndexer = flagsMat.GetIndexer();
for (int i = 0; i < maskMat.Length; i++)
{
if (maskMat[i] > 0)
{
if (flagsMatIndexer[idx++] != 0.0f)
{
nonZeroCount++;
}
else
maskMat[i] = 0;
}
}
m.CopyTo(mask);
}
}
maskHandle.Free();
return nonZeroCount;
}
示例5: Extract
public static Mat Extract(Mat srcMat, ColorConversionCodes code,
int ch1Lower, int ch1Upper,
int ch2Lower, int ch2Upper,
int ch3Lower, int ch3Upper)
{
var maskMat = ExtractMask(srcMat,
code,
ch1Lower, ch1Upper,
ch2Lower, ch2Upper,
ch3Lower, ch3Upper
);
srcMat.CopyTo(maskMat, maskMat);
return maskMat;
}
示例6: Run
public void Run()
{
Mat img = Cv2.ImRead(FilePath.Image.Lenna, ImreadModes.GrayScale);
// expand input image to optimal size
Mat padded = new Mat();
int m = Cv2.GetOptimalDFTSize(img.Rows);
int n = Cv2.GetOptimalDFTSize(img.Cols); // on the border add zero values
Cv2.CopyMakeBorder(img, padded, 0, m - img.Rows, 0, n - img.Cols, BorderTypes.Constant, Scalar.All(0));
// Add to the expanded another plane with zeros
Mat paddedF32 = new Mat();
padded.ConvertTo(paddedF32, MatType.CV_32F);
Mat[] planes = { paddedF32, Mat.Zeros(padded.Size(), MatType.CV_32F) };
Mat complex = new Mat();
Cv2.Merge(planes, complex);
// this way the result may fit in the source matrix
Mat dft = new Mat();
Cv2.Dft(complex, dft);
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
Mat[] dftPlanes;
Cv2.Split(dft, out dftPlanes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
// planes[0] = magnitude
Mat magnitude = new Mat();
Cv2.Magnitude(dftPlanes[0], dftPlanes[1], magnitude);
magnitude += Scalar.All(1); // switch to logarithmic scale
Cv2.Log(magnitude, magnitude);
// crop the spectrum, if it has an odd number of rows or columns
Mat spectrum = magnitude[
new Rect(0, 0, magnitude.Cols & -2, magnitude.Rows & -2)];
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = spectrum.Cols / 2;
int cy = spectrum.Rows / 2;
Mat q0 = new Mat(spectrum, new Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1 = new Mat(spectrum, new Rect(cx, 0, cx, cy)); // Top-Right
Mat q2 = new Mat(spectrum, new Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3 = new Mat(spectrum, new Rect(cx, cy, cx, cy)); // Bottom-Right
// swap quadrants (Top-Left with Bottom-Right)
Mat tmp = new Mat();
q0.CopyTo(tmp);
q3.CopyTo(q0);
tmp.CopyTo(q3);
// swap quadrant (Top-Right with Bottom-Left)
q1.CopyTo(tmp);
q2.CopyTo(q1);
tmp.CopyTo(q2);
// Transform the matrix with float values into a
Cv2.Normalize(spectrum, spectrum, 0, 1, NormTypes.MinMax);
// Show the result
Cv2.ImShow("Input Image" , img);
Cv2.ImShow("Spectrum Magnitude", spectrum);
// calculating the idft
Mat inverseTransform = new Mat();
Cv2.Dft(dft, inverseTransform, DftFlags.Inverse | DftFlags.RealOutput);
Cv2.Normalize(inverseTransform, inverseTransform, 0, 1, NormTypes.MinMax);
Cv2.ImShow("Reconstructed by Inverse DFT", inverseTransform);
Cv2.WaitKey();
}