本文整理汇总了C#中GpuMat类的典型用法代码示例。如果您正苦于以下问题:C# GpuMat类的具体用法?C# GpuMat怎么用?C# GpuMat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
GpuMat类属于命名空间,在下文中一共展示了GpuMat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FindEyes
private Rectangle[] FindEyes(string eyeFileName, CudaImage<Gray, Byte> image)
{
using (CudaCascadeClassifier eye = new CudaCascadeClassifier(eyeFileName))
using (GpuMat eyeRegionMat = new GpuMat())
{
eye.DetectMultiScale(image, eyeRegionMat);
Rectangle[] eyeRegion = eye.Convert(eyeRegionMat);
return eyeRegion;
}
}
示例2: TestGpuMatContinuous
public void TestGpuMatContinuous()
{
if (!CudaInvoke.HasCuda)
return;
GpuMat<Byte> mat = new GpuMat<byte>(1200, 640, 1, true);
Assert.IsTrue(mat.IsContinuous);
}
示例3: Calculate
public Mat Calculate(Bitmap referenceBitmap, Bitmap currentBitmap)
{
Mat homography;
using (var detector = new CudaSURF(threshold))
using (var model = new Image<Gray, byte>(referenceBitmap))
using (var observed = new Image<Gray, byte>(currentBitmap))
using (var modelMat = new GpuMat(model))
using (var modelKeyPointsRaw = detector.DetectKeyPointsRaw(modelMat))
using (var modelKeyPoints = new VectorOfKeyPoint())
using (var modelDescriptorsRaw = detector.ComputeDescriptorsRaw(modelMat, null, modelKeyPointsRaw))
using (var observedMat = new GpuMat(observed))
using (var observedKeyPointsRaw = detector.DetectKeyPointsRaw(observedMat))
using (var observedKeyPoints = new VectorOfKeyPoint())
using (var observedDescriptorsRaw = detector.ComputeDescriptorsRaw(observedMat, null, observedKeyPointsRaw))
using (
var matcher =
new CudaBFMatcher(DistanceType.L2))
using (var matches = new VectorOfVectorOfDMatch())
{
matcher.KnnMatch(observedDescriptorsRaw, modelDescriptorsRaw, matches, k);
detector.DownloadKeypoints(modelKeyPointsRaw, modelKeyPoints);
detector.DownloadKeypoints(observedKeyPointsRaw, observedKeyPoints);
homography = TryFindHomography(modelKeyPoints, observedKeyPoints, matches);
}
return homography;
}
示例4: Main
static void Main()
{
Application.EnableVisualStyles();
Application.SetCompatibleTextRenderingDefault(false);
using (Mat image = new Mat("pedestrian.png"))
{
long processingTime;
Rectangle[] results;
if (CudaInvoke.HasCuda)
{
using (GpuMat gpuMat = new GpuMat(image))
results = FindPedestrian.Find(gpuMat, out processingTime);
}
else
{
using (UMat uImage = image.GetUMat(AccessType.ReadWrite))
results = FindPedestrian.Find(uImage, out processingTime);
}
foreach (Rectangle rect in results)
{
CvInvoke.Rectangle(image, rect, new Bgr(Color.Red).MCvScalar);
}
ImageViewer.Show(
image,
String.Format("Pedestrian detection using {0} in {1} milliseconds.",
CudaInvoke.HasCuda ? "GPU" :
CvInvoke.UseOpenCL ? "OpenCL":
"CPU",
processingTime));
}
}
示例5: Solve
public Image<Gray, byte> Solve(Image<Gray, byte> left, Image<Gray, byte> right)
{
var size = left.Size;
using (var leftGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var rightGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var disparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var filteredDisparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var filteredDisparity16S = new Mat(size, DepthType.Cv16S, 1))
using (var filteredDisparity8U = new Mat(size, DepthType.Cv8U, 1))
{
leftGpu.Upload(left.Mat);
rightGpu.Upload(right.Mat);
algorithm.FindStereoCorrespondence(leftGpu, rightGpu, disparityGpu);
filter.Apply(disparityGpu, leftGpu, filteredDisparityGpu);
filteredDisparityGpu.Download(filteredDisparity16S);
CvInvoke.MinMaxLoc(filteredDisparity16S, ref min, ref max, ref minPosition, ref maxPosition);
filteredDisparity16S.ConvertTo(filteredDisparity8U, DepthType.Cv8U, 255.0/(Max - Min));
return new Image<Gray, byte>(filteredDisparity8U.Bitmap);
}
}
示例6: TestCudaImageAsyncOps
public void TestCudaImageAsyncOps()
{
if (CudaInvoke.HasCuda)
{
int counter = 0;
Stopwatch watch = Stopwatch.StartNew();
using (GpuMat img1 = new GpuMat(3000, 2000, DepthType.Cv8U, 3))
using (GpuMat img2 = new GpuMat(3000, 2000, DepthType.Cv8U, 3))
using (GpuMat img3 = new GpuMat())
using (Stream stream = new Stream())
using (GpuMat mat1 = new GpuMat())
{
img1.ConvertTo(mat1, DepthType.Cv8U, 1, 0, stream);
while (!stream.Completed)
{
if (counter <= int.MaxValue) counter++;
}
Trace.WriteLine(String.Format("Counter has been incremented {0} times", counter));
counter = 0;
CudaInvoke.CvtColor(img2, img3, CvToolbox.GetColorCvtCode(typeof(Bgr), typeof(Gray)), 1, stream);
while (!stream.Completed)
{
if (counter <= int.MaxValue) counter++;
}
Trace.WriteLine(String.Format("Counter has been incremented {0} times", counter));
}
watch.Stop();
Trace.WriteLine(String.Format("Total time: {0} milliseconds", watch.ElapsedMilliseconds));
}
}
示例7: CudaCascadeClassifier
/// <summary>
/// Create a Cuda cascade classifier using the specific file
/// </summary>
/// <param name="fileName">The file to create the classifier from</param>
public CudaCascadeClassifier(String fileName)
{
#if !NETFX_CORE
Debug.Assert(File.Exists(fileName), String.Format("The Cascade file {0} does not exist.", fileName));
#endif
using (CvString s = new CvString(fileName))
_ptr = CudaInvoke.cudaCascadeClassifierCreate(s);
_buffer = new GpuMat(1, 100, DepthType.Cv32S, 4);
}
示例8: DetectKeyPoints
/// <summary>
/// Detect keypoints in the CudaImage
/// </summary>
/// <param name="img">The image where keypoints will be detected from</param>
/// <param name="mask">The optional mask, can be null if not needed</param>
/// <returns>An array of keypoints</returns>
public MKeyPoint[] DetectKeyPoints(GpuMat img, GpuMat mask)
{
using (GpuMat tmp = DetectKeyPointsRaw(img, mask))
using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
{
DownloadKeypoints(tmp, kpts);
return kpts.ToArray();
}
}
示例9: Find
/// <summary>
/// Find the pedestrian in the image
/// </summary>
/// <param name="image">The image</param>
/// <param name="processingTime">The pedestrian detection time in milliseconds</param>
/// <returns>The region where pedestrians are detected</returns>
public static Rectangle[] Find(Mat image, bool tryUseCuda, bool tryUseOpenCL, out long processingTime)
{
Stopwatch watch;
Rectangle[] regions;
#if !(IOS || NETFX_CORE)
//check if there is a compatible Cuda device to run pedestrian detection
if (tryUseCuda && CudaInvoke.HasCuda)
{ //this is the Cuda version
using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
{
des.SetSVMDetector(des.GetDefaultPeopleDetector());
watch = Stopwatch.StartNew();
using (GpuMat cudaBgr = new GpuMat(image))
using (GpuMat cudaBgra = new GpuMat())
using (VectorOfRect vr = new VectorOfRect())
{
CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
des.DetectMultiScale(cudaBgra, vr);
regions = vr.ToArray();
}
}
}
else
#endif
{
//Many opencl functions require opencl compatible gpu devices.
//As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented
//So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices).
CvInvoke.UseOpenCL = tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice;
//this is the CPU/OpenCL version
using (HOGDescriptor des = new HOGDescriptor())
{
des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
//load the image to umat so it will automatically use opencl is available
UMat umat = image.ToUMat(AccessType.Read);
watch = Stopwatch.StartNew();
MCvObjectDetection[] results = des.DetectMultiScale(umat);
regions = new Rectangle[results.Length];
for (int i = 0; i < results.Length; i++)
regions[i] = results[i].Rect;
watch.Stop();
}
}
processingTime = watch.ElapsedMilliseconds;
return regions;
}
示例10: Find
/// <summary>
/// Find the pedestrian in the image
/// </summary>
/// <param name="image">The image</param>
/// <param name="processingTime">The pedestrian detection time in milliseconds</param>
/// <returns>The region where pedestrians are detected</returns>
public static Rectangle[] Find(Mat image, bool tryUseCuda, out long processingTime)
{
Stopwatch watch;
Rectangle[] regions;
#if !(__IOS__ || NETFX_CORE)
//check if there is a compatible Cuda device to run pedestrian detection
if (tryUseCuda && CudaInvoke.HasCuda)
{ //this is the Cuda version
using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8,8), new Size(8,8)))
{
des.SetSVMDetector(des.GetDefaultPeopleDetector());
watch = Stopwatch.StartNew();
using (GpuMat cudaBgr = new GpuMat(image))
using (GpuMat cudaBgra = new GpuMat() )
using (VectorOfRect vr = new VectorOfRect())
{
CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
des.DetectMultiScale(cudaBgra, vr);
regions = vr.ToArray();
}
}
}
else
#endif
{
//this is the CPU/OpenCL version
using (HOGDescriptor des = new HOGDescriptor())
{
des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
//load the image to umat so it will automatically use opencl is available
UMat umat = image.ToUMat(AccessType.Read);
watch = Stopwatch.StartNew();
MCvObjectDetection[] results = des.DetectMultiScale(umat);
regions = new Rectangle[results.Length];
for (int i = 0; i < results.Length; i++)
regions[i] = results[i].Rect;
watch.Stop();
}
}
processingTime = watch.ElapsedMilliseconds;
return regions;
}
示例11: Find
/// <summary>
/// Find the pedestrian in the image
/// </summary>
/// <param name="image">The image</param>
/// <param name="processingTime">The processing time in milliseconds</param>
/// <returns>The region where pedestrians are detected</returns>
public static Rectangle[] Find(IInputArray image, out long processingTime)
{
Stopwatch watch;
Rectangle[] regions;
using (InputArray iaImage = image.GetInputArray())
{
#if !(__IOS__ || NETFX_CORE)
//if the input array is a GpuMat
//check if there is a compatible Cuda device to run pedestrian detection
if (iaImage.Kind == InputArray.Type.CudaGpuMat)
{
//this is the Cuda version
using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
{
des.SetSVMDetector(des.GetDefaultPeopleDetector());
watch = Stopwatch.StartNew();
using (GpuMat cudaBgra = new GpuMat())
using (VectorOfRect vr = new VectorOfRect())
{
CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra);
des.DetectMultiScale(cudaBgra, vr);
regions = vr.ToArray();
}
}
}
else
#endif
{
//this is the CPU/OpenCL version
using (HOGDescriptor des = new HOGDescriptor())
{
des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
watch = Stopwatch.StartNew();
MCvObjectDetection[] results = des.DetectMultiScale(image);
regions = new Rectangle[results.Length];
for (int i = 0; i < results.Length; i++)
regions[i] = results[i].Rect;
watch.Stop();
}
}
processingTime = watch.ElapsedMilliseconds;
return regions;
}
}
示例12: Update
/// <summary>
/// the update operator [MOG_GPU::operator()]
/// </summary>
/// <param name="frame"></param>
/// <param name="fgmask"></param>
/// <param name="learningRate"></param>
/// <param name="stream"></param>
public void Update(
GpuMat frame, GpuMat fgmask, float learningRate = 0.0f, Stream stream = null)
{
if (disposed)
throw new ObjectDisposedException(GetType().Name);
if (frame == null)
throw new ArgumentNullException("frame");
if (fgmask == null)
throw new ArgumentNullException("fgmask");
stream = stream ?? Stream.Null;
NativeMethods.gpu_MOG_GPU_operator(
ptr, frame.CvPtr, fgmask.CvPtr, learningRate, stream.CvPtr);
GC.KeepAlive(frame);
GC.KeepAlive(fgmask);
GC.KeepAlive(stream);
}
示例13: GetBackgroundImage
/// <summary>
/// Computes a background image which are the mean of all background gaussians
/// </summary>
/// <param name="backgroundImage"></param>
/// <param name="stream"></param>
public void GetBackgroundImage(
GpuMat backgroundImage, Stream stream = null)
{
if (disposed)
throw new ObjectDisposedException(GetType().Name);
if (backgroundImage == null)
throw new ArgumentNullException("backgroundImage");
stream = stream ?? Stream.Null;
NativeMethods.gpu_MOG_GPU_getBackgroundImage(
ptr, backgroundImage.CvPtr, stream.CvPtr);
GC.KeepAlive(backgroundImage);
GC.KeepAlive(stream);
}
示例14: EnqueueConvert
/// <summary>
/// converts matrix type, ex from float to uchar depending on type
/// </summary>
/// <param name="src"></param>
/// <param name="dst"></param>
/// <param name="dtype"></param>
/// <param name="a"></param>
/// <param name="b"></param>
public void EnqueueConvert(GpuMat src, GpuMat dst, int dtype, double a = 1, double b = 0)
{
ThrowIfDisposed();
if (src == null)
throw new ArgumentNullException("src");
if (dst == null)
throw new ArgumentNullException("dst");
src.ThrowIfDisposed();
dst.ThrowIfDisposed();
NativeMethods.cuda_Stream_enqueueConvert(ptr, src.CvPtr, dst.CvPtr, dtype, a, b);
}
示例15: Run
/// <summary>
///
/// </summary>
/// <param name="left"></param>
/// <param name="right"></param>
/// <param name="disparity"></param>
#else
/// <summary>
///
/// </summary>
/// <param name="left"></param>
/// <param name="right"></param>
/// <param name="disparity"></param>
#endif
public void Run(GpuMat left, GpuMat right, GpuMat disparity)
{
if (disposed)
throw new ObjectDisposedException("StereoBM_GPU");
if(left == null)
throw new ArgumentNullException("left");
if(right == null)
throw new ArgumentNullException("right");
if (disparity == null)
throw new ArgumentNullException("disparity");
NativeMethods.StereoBM_GPU_run1(ptr, left.CvPtr, right.CvPtr, disparity.CvPtr);
}