本文整理汇总了C#中Emgu.CV.MemStorage类的典型用法代码示例。如果您正苦于以下问题:C# MemStorage类的具体用法?C# MemStorage怎么用?C# MemStorage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MemStorage类属于Emgu.CV命名空间,在下文中一共展示了MemStorage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FindRectangles
private void FindRectangles(Image<Gray, Byte> blackAndWhiteImage)
{
m_FoundRectangles.Clear();
using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
{
for (Contour<Point> contours = blackAndWhiteImage.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
storage);
contours != null;
contours = contours.HNext)
{
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);
//Debug.WriteLine(currentContour.Area);
if (currentContour.Area > 250) //only consider contours with area greater than 250
{
if (currentContour.Total == 4) //The contour has 4 vertices.
{
if (IsRectangle(currentContour))
{
m_FoundRectangles.Add(currentContour.GetMinAreaRect());
}
}
}
}
}
}
示例2: ContourCoordinates
public void ContourCoordinates()
{
Image<Bgr, Byte> img = this.ShowCamera();
Image<Gray, Byte> g_img = this.FilterImage(img);
Image<Gray, Byte> r_img = new Image<Gray, Byte>(new Size(g_img.Width, g_img.Height));
this.h = g_img.Width;
this.w = g_img.Height;
using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
{
for (Contour<Point> contours = g_img.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_CCOMP,
storage);
contours != null;
contours = contours.HNext)
{
Contour<Point> contour = contours.ApproxPoly(contours.Perimeter * 0.0005, storage);
Point[] pts = contour.ToArray();
LineSegment2D[] edges = PointCollection.PolyLine(pts, false);
CvInvoke.cvDrawContours(r_img, contour, new MCvScalar(200), new MCvScalar(0, 200,0), 5, -1, LINE_TYPE.FOUR_CONNECTED, new Point(0, 0));
for (int k = 0; k < pts.Length; k++)
{
//r_img.Draw(new CircleF(pts[k], 2), new Gray(255), 1);
this.showimg = r_img;
//this.Coord2d.Add(pts[k]);
List<Point> p = new List<Point>();
p.Add(pts[k]);
matrix.Add(p);
}
}
}
}
示例3: HOGDescriptor
/// <summary>
/// Create a new HOGDescriptor using the specific parameters
/// </summary>
public HOGDescriptor(
Size winSize,
Size blockSize,
Size blockStride,
Size cellSize,
int nbins,
int derivAperture,
double winSigma,
double L2HysThreshold,
bool gammaCorrection)
{
_ptr = CvHOGDescriptorCreate(
ref winSize,
ref blockSize,
ref blockStride,
ref cellSize,
nbins,
derivAperture,
winSigma,
0,
L2HysThreshold,
gammaCorrection);
_rectStorage = new MemStorage();
_rectSeq = new Seq<Rectangle>(_rectStorage);
}
示例4: CreateChildMemStorage
/// <summary>
/// Creates a child memory storage that is similar to simple memory storage except for the differences in the memory allocation/deallocation mechanism. When a child storage needs a new block to add to the block list, it tries to get this block from the parent. The first unoccupied parent block available is taken and excluded from the parent block list. If no blocks are available, the parent either allocates a block or borrows one from its own parent, if any. In other words, the chain, or a more complex structure, of memory storages where every storage is a child/parent of another is possible. When a child storage is released or even cleared, it returns all blocks to the parent. In other aspects, the child storage is the same as the simple storage
/// </summary>
/// <returns>Child MemStorage</returns>
public MemStorage CreateChildMemStorage()
{
IntPtr childStoragePtr = CvInvoke.cvCreateChildMemStorage(_ptr);
MemStorage childStorage = new MemStorage(childStoragePtr);
//_childStorageList.Add(childStorage);
return childStorage;
}
示例5: SignDetector
public SignDetector(Image<Bgr, Byte> stopSignModel)
{
_detector2 = new SURFDetector(500, false);
using (Image<Gray, Byte> redMask = GetColorPixelMask(stopSignModel))
{
try
{
_tracker2 = new Features2DTracker<float>(_detector2.DetectFeatures(redMask, null));
}
catch { }
}
_octagonStorage2 = new MemStorage();
_octagon2 = new Contour<Point>(_octagonStorage2);
_octagon2.PushMulti(new Point[] {
//hexagon
new Point(1, 0),
new Point(2, 0),
new Point(3, 1),
new Point(2, 2),
new Point(1, 2),
new Point(0, 1)},
//octagon
//new Point(1, 0),
//new Point(2, 0),
//new Point(3, 1),
//new Point(3, 2),
//new Point(2, 3),
//new Point(1, 3),
//new Point(0, 2),
//new Point(0, 1)},
Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
}
示例6: Form1
public Form1()
{
InitializeComponent();
grabber = new Emgu.CV.Capture("C:/Users/L33549.CITI/Desktop/a.avi");
grabber.QueryFrame();
frameWidth = grabber.Width;
frameHeight = grabber.Height;
//detector = new AdaptiveSkinDetector(1, AdaptiveSkinDetector.MorphingMethod.NONE);
hsv_min = new Hsv(0, 45, 0);
hsv_max = new Hsv(20, 255, 255);
YCrCb_min = new Ycc(0, 129, 40);
YCrCb_max = new Ycc(255, 185, 135);
box = new MCvBox2D();
ellip = new Ellipse();
contourStorage = new MemStorage();
approxStorage = new MemStorage();
hullStorage = new MemStorage();
defectsStorage = new MemStorage();
tipPts = new Point[MAX_POINTS]; // coords of the finger tips
foldPts = new Point[MAX_POINTS]; // coords of the skin folds between fingers
depths = new float[MAX_POINTS]; // distances from tips to folds
cogPt = new Point();
fingerTips = new List<Point>();
face = new CascadeClassifier("C:/Users/L33549.CITI/Desktop/AbuseAnalysis/HandGestureRecognition/HandGestureRecognition/HandGestureRecognition/haar/Original/haarcascade_hand.xml");
Application.Idle += new EventHandler(FrameGrabber);
/*foreach (var potentialSensor in KinectSensor.KinectSensors)
{
if (potentialSensor.Status == KinectStatus.Connected)
{
this.sensor = potentialSensor;
break;
}
}
if (null != this.sensor)
{
// Turn on the color stream to receive color frames
this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
// Allocate space to put the pixels we'll receive
this.colorPixels = new byte[this.sensor.ColorStream.FramePixelDataLength];
// This is the bitmap we'll display on-screen
this.colorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
// Set the image we display to point to the bitmap where we'll put the image data
//this.Image.Source = this.colorBitmap;
// Add an event handler to be called whenever there is new color frame data
this.sensor.ColorFrameReady += this.SensorColorFrameReady;
// Start the sensor!
this.sensor.Start();
}*/
}
示例7: button1_Click
private void button1_Click(object sender, EventArgs e)
{
OpenFileDialog Openfile = new OpenFileDialog();
if (Openfile.ShowDialog() == DialogResult.OK)
{
Image<Bgr, byte> My_Image = new Image<Bgr, byte>(Openfile.FileName);
Image<Gray, byte> gray_image = My_Image.Convert<Gray, byte>();
Image<Gray, byte> eh_gray_image = My_Image.Convert<Gray, byte>();
Image<Gray, byte> smooth_gray_image = My_Image.Convert<Gray, byte>();
Image<Gray, byte> ed_gray_image = new Image<Gray, byte>(gray_image.Size);
Image<Bgr, byte> final_image = new Image<Bgr, byte>(Openfile.FileName);
MemStorage stor = new MemStorage();
List<MCvBox2D> detectedLicensePlateRegionList = new List<MCvBox2D>();
CvInvoke.cvEqualizeHist(gray_image, eh_gray_image);
CvInvoke.cvSmooth(eh_gray_image, smooth_gray_image, Emgu.CV.CvEnum.SMOOTH_TYPE.CV_GAUSSIAN, 3, 3, 0, 0);
//CvInvoke.cvAdaptiveThreshold(smooth_gray_image, bi_gray_image, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 71, 1);
CvInvoke.cvCanny(smooth_gray_image, ed_gray_image, 100, 50, 3);
Contour<Point> contours = ed_gray_image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor);
DetectPlate(contours, detectedLicensePlateRegionList);
for (int i = 0; i < detectedLicensePlateRegionList.Count; i++)
{
final_image.Draw(detectedLicensePlateRegionList[i], new Bgr(Color.Red), 2);
}
imageBox1.Image = My_Image;
imageBox2.Image = gray_image;
imageBox3.Image = eh_gray_image;
imageBox4.Image = smooth_gray_image;
imageBox5.Image = ed_gray_image;
imageBox6.Image = final_image;
}
}
示例8: DetectLicensePlate
/// <summary>
/// Detect license plate from the given image
/// </summary>
/// <param name="img">The image to search license plate from</param>
/// <param name="licensePlateImagesList">A list of images where the detected license plate regions are stored</param>
/// <param name="filteredLicensePlateImagesList">A list of images where the detected license plate regions (with noise removed) are stored</param>
/// <param name="detectedLicensePlateRegionList">A list where the regions of license plate (defined by an MCvBox2D) are stored</param>
/// <returns>The list of words for each license plate</returns>
public List<List<Word>> DetectLicensePlate(
Image<Bgr, byte> img,
List<Image<Gray, Byte>> licensePlateImagesList,
List<Image<Gray, Byte>> filteredLicensePlateImagesList,
List<MCvBox2D> detectedLicensePlateRegionList)
{
List<List<Word>> licenses = new List<List<Word>>();
// Convert image to gray
using (Image<Gray, byte> gray = img.Convert<Gray, Byte>())
// Create Canny image
using (Image<Gray, Byte> canny = new Image<Gray, byte>(gray.Size))
//Create MemStorage
using (MemStorage stor = new MemStorage())
{
//Convert gray with Canny Algorithm
CvInvoke.cvCanny(gray, canny, 130, 70, 3);
//List all Contour
Contour<Point> contours = canny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
stor);
//Check Contour
FindLicensePlate(contours, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
}
return licenses;
}
示例9: HOGDescriptor
/// <summary>
/// Create a new HOGDescriptor
/// </summary>
public HOGDescriptor()
{
_ptr = CvHOGDescriptorCreateDefault();
_rectStorage = new MemStorage();
_rectSeq = new Seq<Rectangle>(_rectStorage);
_vector = new VectorOfFloat();
}
示例10: ArrowSignDetector
//public Bitmap _grayImg;
//public Bitmap _canndyImg;
public ArrowSignDetector()
{
_stor = new MemStorage();
_defaultStor = new MemStorage();
_tempStor = new MemStorage();
// _defaultContour = FindDefault();
}
示例11: FilterPlate
private static Image<Gray, Byte> FilterPlate(Image<Gray, Byte> plate)
{
Image<Gray, Byte> thresh = plate.ThresholdBinaryInv(new Gray(120), new Gray(255));
Image<Gray, Byte> plateMask = new Image<Gray, byte>(plate.Size);
Image<Gray, Byte> plateCanny = plate.Canny(new Gray(100), new Gray(50));
MemStorage stor = new MemStorage();
{
plateMask.SetValue(255.0);
for (
Contour<Point> contours = plateCanny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
stor);
contours != null; contours = contours.HNext)
{
Rectangle rect = contours.BoundingRectangle;
if (rect.Height > (plate.Height >> 1))
{
rect.X -= 1; rect.Y -= 1; rect.Width += 2; rect.Height += 2;
rect.Intersect(plate.ROI);
plateMask.Draw(rect, new Gray(0.0), -1);
}
}
thresh.SetValue(0, plateMask);
}
thresh._Erode(1);
thresh._Dilate(1);
return thresh;
}
示例12: HandDetector
public HandDetector(string hsvFnm, int width, int height)
{
Size scale = new Size(width/IMG_SCALE, height/IMG_SCALE);
scaleImg = new Image<Bgr, Byte>(scale);
hsvImg = new Image<Hsv, Byte>(scale);
imgThreshed = new Image<Gray, Byte>(scale);
// storage for contour, hull, and defect calculations by OpenCV
contourStorage = new MemStorage();
approxStorage = new MemStorage();
hullStorage = new MemStorage();
defectsStorage = new MemStorage();
msgFont = new Font("SansSerif", 18, FontStyle.Bold, GraphicsUnit.Pixel);
cogPt = new Point();
fingerTips = new List<Point>();
namedFingers = new List<FingerNameClass.FingerName>();
tipPts = new Point[MAX_POINTS]; // coords of the finger tips
foldPts = new Point[MAX_POINTS]; // coords of the skin folds between fingers
depths = new float[MAX_POINTS]; // distances from tips to folds
hueLower = 0;
hueUpper = 20;
satLower = 50;
satUpper = 255;
briLower = 0;
briUpper = 255;
}
示例13: ProcessFrame
private void ProcessFrame(object sender, EventArgs e)
{
using (MemStorage storage = new MemStorage()) //create storage for motion components
{
Image<Bgr, Byte> image = _capture.QuerySmallFrame().PyrUp(); //reduce noise from the image
capturedImageBox.Image = image;
//update the motion history
_motionHistory.Update(image.Convert<Gray, Byte>());
#region get a copy of the motion mask and enhance its color
Image<Gray, Byte> motionMask = _motionHistory.Mask;
double[] minValues, maxValues;
System.Drawing.Point[] minLoc, maxLoc;
motionMask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
motionMask._Mul(255.0 / maxValues[0]);
#endregion
//create the motion image
Image<Bgr, Byte> motionImage = new Image<Bgr, byte>(motionMask.Size);
//display the motion pixels in blue (first channel)
motionImage[0] = motionMask;
//Threshold to define a motion area, reduce the value to detect smaller motion
double minArea = 100;
storage.Clear(); //clear the storage
Seq<MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage);
//iterate through each of the motion component
foreach (MCvConnectedComp comp in motionComponents)
{
//reject the components that have small area;
if (comp.area < minArea) continue;
// find the angle and motion pixel count of the specific area
double angle, motionPixelCount;
_motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount);
//reject the area that contains too few motion
if (motionPixelCount < comp.area * 0.05) continue;
//Draw each individual motion in red
DrawMotion(motionImage, comp.rect, angle, new Bgr(Color.Red));
}
// find and draw the overall motion angle
double overallAngle, overallMotionPixelCount;
_motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount);
DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(Color.Green));
//Display the amount of motions found on the current image
UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionComponents.Total, overallMotionPixelCount));
//Display the image of the motion
motionImageBox.Image = motionImage;
}
}
示例14: GetModelPoints
/// <summary>
/// Get the model points stored in this detector
/// </summary>
/// <returns>The model points stored in this detector</returns>
public MKeyPoint[] GetModelPoints()
{
using (MemStorage stor = new MemStorage())
{
Seq<MKeyPoint> modelPoints = new Seq<MKeyPoint>(stor);
CvPlanarObjectDetectorGetModelPoints(_ptr, modelPoints);
return modelPoints.ToArray();
}
}
示例15: Detect
/// <summary>
/// Detect planar object from the specific image
/// </summary>
/// <param name="image">The image where the planar object will be detected</param>
/// <param name="h">The homography matrix which will be updated</param>
/// <returns>The four corners of the detected region</returns>
public PointF[] Detect(Image<Gray, Byte> image, HomographyMatrix h)
{
using (MemStorage stor = new MemStorage())
{
Seq<PointF> corners = new Seq<PointF>(stor);
CvPlanarObjectDetectorDetect(_ptr, image, h, corners);
return corners.ToArray();
}
}