本文整理汇总了C#中Emgu类的典型用法代码示例。如果您正苦于以下问题:C# Emgu类的具体用法?C# Emgu怎么用?C# Emgu使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Emgu类属于命名空间,在下文中一共展示了Emgu类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: PreQuantization
protected override void PreQuantization(Emgu.CV.Image<Color, byte> image)
{
this.image = image;
// If only there is one, do nothing
if (this.palettes.Length == 1) {
base.PreQuantization(image);
return;
}
// Extract a palette from the image removing transparent colors (not approximated)
this.compareQuantization.TileSize = this.TileSize;
this.compareQuantization.Quantizate(image);
Color[] comparePalette = this.compareQuantization.Palette.
Where(c => c.Alpha == 255).ToArray();
LabColor[] compareLabPalette = ColorConversion.ToLabPalette<Color>(comparePalette);
// Compare all possible palettes to get the similar one
double minDistance = Double.MaxValue;
for (int i = 0; i < palettes.Length && minDistance > 0; i++) {
double distance = PaletteDistance.CalculateDistance(
compareLabPalette, this.labPalettes[i]);
if (distance < minDistance) {
this.SelectedPalette = i;
minDistance = distance;
}
}
// Set the palette...
this.Palette = this.palettes[this.SelectedPalette];
// ... and run the FixedPaletteQuantization
base.PreQuantization(image);
}
示例2: CalibrationError
public static void CalibrationError(
Emgu.CV.ExtrinsicCameraParameters ecp,
Emgu.CV.IntrinsicCameraParameters icp,
System.Drawing.PointF[] image_points,
Vector[] reference_points,
out double[] deviations,
out Vector[] isect_points)
{
// Shoot rays through image points,
// intersect with plane defined by extrinsic
// and measure distance to reference points
Matrix inv_ecp = Matrix.Identity(4, 4);
inv_ecp.SetMatrix(0, 2, 0, 3, ecp.ExtrinsicMatrix.ToParsley());
inv_ecp = inv_ecp.Inverse();
Ray[] rays = Ray.EyeRays(icp, image_points);
Plane p = new Plane(ecp);
isect_points = new Vector[rays.Length];
deviations = new double[rays.Length];
for (int i = 0; i < rays.Length; ++i) {
double t;
Intersection.RayPlane(rays[i], p, out t);
Vector isect = rays[i].At(t);
Vector x = new Vector(new double[]{isect[0],isect[1],isect[2],1});
x = (inv_ecp * x.ToColumnMatrix()).GetColumnVector(0);
Vector final = new Vector(new double[]{x[0], x[1], x[2]});
isect_points[i] = final;
deviations[i] = (final - reference_points[i]).Norm();
}
}
示例3: ExtractBriefFeatureDescriptors
public static Matrix<byte> ExtractBriefFeatureDescriptors(Emgu.CV.Image<Gray, byte> im, MKeyPoint kp)
{
var f = new VectorOfKeyPoint();
f.Push(new MKeyPoint[] { kp });
//i'm are going to invoke this with a single point because otherwise I cannot tell which points failed to get descriptors
return new BriefDescriptorExtractor().ComputeDescriptorsRaw(im, (Emgu.CV.Image<Gray, byte>)null, f);
}
示例4: FastFeatureExtRaw
public static IEnumerable<MKeyPoint> FastFeatureExtRaw(Emgu.CV.Image<Gray, byte> image, FeatureExtractionOptions options)
{
return new FastDetector(options.threshold, true)
.DetectKeyPointsRaw(image, (Emgu.CV.Image<Gray, byte>) null).ToArray()
.OrderByDescending(kp => kp.Response)
.Take(options.numPoints);
}
示例5: ProcessImage
public virtual void ProcessImage(Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> image) {
Emgu.CV.Image<Gray, byte> gray = image.Convert<Gray, byte>();
gray._ThresholdBinary(new Gray(_threshold), new Gray(255.0));
gray._Not();
Parsley.Core.EllipseDetector ed = new Parsley.Core.EllipseDetector();
ed.MinimumContourCount = _min_contour_count;
List < Parsley.Core.DetectedEllipse > ellipses =
new List<Parsley.Core.DetectedEllipse>(ed.DetectEllipses(gray));
List < Parsley.Core.DetectedEllipse > finals =
new List<Parsley.Core.DetectedEllipse>(
ellipses.Where(e => { return e.Rating < _distance_threshold; })
);
finals.Sort(
(a, b) => {
double dista = a.Ellipse.MCvBox2D.center.X * a.Ellipse.MCvBox2D.center.X + a.Ellipse.MCvBox2D.center.Y * a.Ellipse.MCvBox2D.center.Y;
double distb = b.Ellipse.MCvBox2D.center.X * b.Ellipse.MCvBox2D.center.X + b.Ellipse.MCvBox2D.center.Y * b.Ellipse.MCvBox2D.center.Y;
return dista.CompareTo(distb);
}
);
Bgr bgr = new Bgr(0, 255, 0);
MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 0.8, 0.8);
int count = 1;
foreach (Parsley.Core.DetectedEllipse e in finals) {
image.Draw(e.Ellipse, bgr, 2);
image.Draw(count.ToString(), ref f, new System.Drawing.Point((int)e.Ellipse.MCvBox2D.center.X, (int)e.Ellipse.MCvBox2D.center.Y), bgr);
count++;
}
}
示例6: ExtractPoints
private List<System.Drawing.PointF> ExtractPoints(Emgu.CV.Image<Gray, byte> channel) {
int[] max_intensities = new int[channel.Width];
float[] range = new float[channel.Width];
// Note that default float is zero.
// Search per row
byte[] d = channel.Bytes;
int stride = d.Length / channel.Height;
int h = channel.Height; // This one here is a huge, HUGE timesaver!
int w = channel.Width; // This one here is a huge, HUGE timesaver!
unchecked {
for (int r = 0; r < h; ++r) {
int offset = stride * r;
for (int c = 0; c < w; ++c) {
byte i = d[offset + c];
if (i > max_intensities[c]) {
max_intensities[c] = i;
range[c] = r;
}
}
}
}
// Update output: set -1 for invalid laser line poses
List<System.Drawing.PointF> pixels = new List<System.Drawing.PointF>();
for (int i = 0; i < w; ++i) {
if (max_intensities[i] >= _threshold) {
pixels.Add(new System.Drawing.PointF(i, range[i]));
}
}
return pixels;
}
示例7: addImage
public void addImage( string imageId, string imageType, Emgu.CV.Image<Emgu.CV.Structure.Gray, byte> image )
{
ensureImageBoxNamesUpdated( imageType );
ImageBox ib = new ImageBox();
ib.Image = image;
ib.Width = 100;
ib.Height = 100;
ib.SizeMode = PictureBoxSizeMode.Zoom;
ib.HorizontalScrollBar.Visible = false;
ib.VerticalScrollBar.Visible = false;
if ( _imageBoxNames[ 0 ].CompareTo( imageType ) == 0 )
{
flowPanel1.Controls.Add( ib );
return;
}
if ( _imageBoxNames[ 1 ].CompareTo( imageType ) == 0 )
{
flowPanel2.Controls.Add( ib );
return;
}
if ( _imageBoxNames[ 2 ].CompareTo( imageType ) == 0 )
{
flowPanel3.Controls.Add( ib );
}
return;
}
示例8: ProcessImage
public void ProcessImage(Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> image) {
MCvBox2D mybox = new MCvBox2D(new System.Drawing.PointF(100, 100), new System.Drawing.Size(50, 30), 110);
MCvBox2D mybox2 = new MCvBox2D(new System.Drawing.PointF(100, 100), new System.Drawing.Size(50, 30), 0);
image.Draw(new Ellipse(mybox), new Bgr(0,0,255), 2);
image.Draw(new Ellipse(mybox2), new Bgr(0, 255, 0), 2);
}
示例9: ConvertCVImageToPixbuf
public static Pixbuf ConvertCVImageToPixbuf(Emgu.CV.Image<Bgr, byte> img)
{
System.Drawing.Bitmap bmp = img.Bitmap;
MemoryStream ms = new MemoryStream();
bmp.Save(ms, System.Drawing.Imaging.ImageFormat.Bmp);
return new Pixbuf(ms.GetBuffer());
}
示例10: OnFrame
protected override void OnFrame(Parsley.Core.BuildingBlocks.FrameGrabber fp, Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> img) {
/*
if (_take_ref_image) {
_ref_image = img.Copy();
_take_ref_image = false;
}
// 1. Extract laser-line
Context.Setup.World.Laser.FindLaserLine(img);
PointF[] laser_points = Context.Setup.World.Laser.ValidLaserPoints.ToArray();
if (_acc != null) {
img.Draw(_acc.ROI, new Bgr(Color.Green), 1);
}
if (laser_points.Length < 3 || _ref_image == null || _acc == null) {
return;
}
Core.Ray[] eye_rays = Core.Ray.EyeRays(Context.Setup.World.Camera.Intrinsics, laser_points);
Core.Plane laser_plane;
if (Context.Setup.World.Laser.LaserPlaneAlgorithm.FindLaserPlane(
eye_rays,
Context.Setup.World.ReferencePlanes, out laser_plane))
{
Vector z = Vector.Create(new double[] { 0, 0, 1 });
if (Math.Abs(laser_plane.Normal.ScalarMultiply(z)) < 0.3) {
Console.WriteLine(laser_plane.Normal);
return;
}
lock (Context.Viewer) {
for (int i = 0; i < laser_points.Length; ++i) {
Point lp = new Point((int)laser_points[i].X, (int)laser_points[i].Y);
if (_acc.ROI.Contains(lp)) {
double t;
Core.Intersection.RayPlane(eye_rays[i], laser_plane, out t);
img[lp.Y, lp.X] = new Bgr(Color.Red);
Bgr bgr = _ref_image[lp.Y, lp.X];
Vector color = new Vector(new double[] { bgr.Red / 255.0, bgr.Green / 255.0, bgr.Blue / 255.0, 1.0 });
//_pointcloud.AddPoint(final.ToInterop(), color.ToInterop());
Point p_in_roi = _acc.MakeRelativeToROI(lp);
bool first;
_acc.Accumulate(p_in_roi, eye_rays[i], t, out first);
if (first) {
_acc.SetId(p_in_roi, _pointcloud.AddPoint(_acc.Extract(p_in_roi).ToInterop(), color.ToInterop()));
} else {
_pointcloud.UpdatePoint(_acc.GetId(p_in_roi), _acc.Extract(p_in_roi).ToInterop(), color.ToInterop());
}
}
}
}
}
* */
}
示例11: OnFrame
protected override void OnFrame(Parsley.Core.BuildingBlocks.FrameGrabber fp, Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> img)
{
// Constraint checking
if (!Context.Setup.Camera.HasIntrinsics)
{
_on_roi = false;
return;
}
if (_interactor.State == Parsley.UI.InteractionState.Interacting)
{
_interactor.DrawIndicator(_interactor.Current, img);
}
else
{
_interactor.DrawIndicator(_r, img);
}
if (_on_roi && _pattern != null)
{
Image<Gray, Byte> gray = img.Convert<Gray, Byte>();
_pattern.IntrinsicParameters = Context.Setup.Camera.Intrinsics;
try
{
_pattern.FindPattern(gray, _r);
if (_pattern.PatternFound)
{
Parsley.Core.ExtrinsicCalibration ec = new Parsley.Core.ExtrinsicCalibration(_pattern.ObjectPoints, Context.Setup.Camera.Intrinsics);
ExtrinsicCameraParameters ecp = ec.Calibrate(_pattern.ImagePoints);
double[] deviations;
Vector[] points;
Core.ExtrinsicCalibration.CalibrationError(ecp,Context.Setup.Camera.Intrinsics,_pattern.ImagePoints,
_pattern.ObjectPoints,out deviations,out points);
double max_error = deviations.Max();
if (max_error < _last_error)
{
_last_detected_plane = ecp;
_last_error = max_error;
this.Logger.Info(String.Format("Extrinsics successfully calculated. Maximum error {0:F3}", _last_error));
}
}
else if (!_pattern.PatternFound & _last_detected_plane == null)
{
this.Logger.Warn("Pattern not found.");
}
}
catch (System.Exception e)
{
this.Logger.Warn(String.Format("Failed to determine extrinsic calibration: {0}", e.Message));
}
}
if (_last_detected_plane != null)
{
Core.Drawing.DrawCoordinateFrame(img, _last_detected_plane, Context.Setup.Camera.Intrinsics);
}
}
示例12: DetectedEllipse
/// <summary>
/// Construct from values.
/// </summary>
/// <param name="contour"></param>
/// <param name="ellipse"></param>
/// <param name="rating"></param>
public DetectedEllipse(
Emgu.CV.Contour<System.Drawing.Point> contour,
Emgu.CV.Structure.Ellipse ellipse,
double rating) {
_contour = contour;
_ellipse = ellipse;
_rating = rating;
}
示例13: HoughLineTransform
/// <summary>
/// Hough Line Transform, as in OpenCV (EmguCv does not wrap this function as it should be)
/// </summary>
/// <param name="img">Binary image</param>
/// <param name="type">type of hough transform</param>
/// <param name="threshold">how many votes is needed to accept line</param>
/// <returns>Lines in theta/rho format</returns>
public static PointF[] HoughLineTransform(Image<Gray, byte> img, Emgu.CV.CvEnum.HOUGH_TYPE type, int threshold)
{
using (MemStorage stor = new MemStorage())
{
IntPtr linePtr = CvInvoke.cvHoughLines2(img, stor.Ptr, type, 5, Math.PI / 180 * 15, threshold, 0, 0);
Seq<PointF> seq = new Seq<PointF>(linePtr, stor);
return seq.ToArray(); ;
}
}
示例14: ExtrinsicCalibration
public ExtrinsicCalibration(Vector[] object_points, Emgu.CV.IntrinsicCameraParameters intrinsics)
: base(object_points)
{
_intrinsics = intrinsics;
// Since object points remain constant, we can apply their conversion right here
_converted_object_points = Array.ConvertAll<Vector, MCvPoint3D32f>(
this.ObjectPoints,
new Converter<Vector, MCvPoint3D32f>(Extensions.ConvertFromParsley.ToEmguF)
);
}
示例15: SURFEngine
public SURFEngine(Emgu.CV.Image<Gray, byte> roi)
{
surfDetector = new SURFDetector(500, false);
itemImage = roi;
itemKP = surfDetector.DetectKeyPointsRaw(itemImage, null);
itemDescriptors = surfDetector.ComputeDescriptorsRaw(itemImage, null, itemKP);
matcher = new BruteForceMatcher<float>(DistanceType.L2);
matcher.Add(itemDescriptors);
}