本文整理汇总了C#中Image.DetectHaarCascade方法的典型用法代码示例。如果您正苦于以下问题:C# Image.DetectHaarCascade方法的具体用法?C# Image.DetectHaarCascade怎么用?C# Image.DetectHaarCascade使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.DetectHaarCascade方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FaceDetection
public Image<Bgr, Byte> FaceDetection(Image Image)
{
face = new HaarCascade("haarcascade_frontalface_default.xml");
Utility UTl = new Utility();
//Get the current frame form capture device
Image<Bgr, Byte> currentFrame = UTl.ImageToBgrByte(Image);
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face,1.2,10,Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,new Size(20, 20));
//Action for element detected
try
{
MCvAvgComp f = facesDetected[0][0];
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.White), 2);
}
catch (Exception ex)
{
MessageBox.Show("Camera Error: Empty frames arrived" + ex.Message.ToString(), "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
return currentFrame;
}
示例2: FrameGrabber
void FrameGrabber(object sender, EventArgs e)
{
//Numero inicial de rostros detectados
label3.Text = "0";
//Obtener el frame actual desde el disposiivo
currentFrame = grabber.QueryFrame().Resize(425, 322, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Convertir el frame a escala de grises
gray = currentFrame.Convert<Gray, Byte>();
//Detector de Rostros
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Dibujar el ROI para cada rostro detectado
foreach (MCvAvgComp f in facesDetected[0])
{
//Dibujar el ROI de color para identificar el rostro detectado
currentFrame.Draw(f.rect, new Bgr(Color.OrangeRed), 2);
//Colocar el numero actual de rostros detectados
label3.Text = facesDetected[0].Length.ToString();
//Ajustar el ROI para la detección de los ojos
gray.ROI = f.rect;
MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
eye,
1.1,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
gray.ROI = Rectangle.Empty;
foreach (MCvAvgComp ey in eyesDetected[0])
{
Rectangle eyeRect = ey.rect;
eyeRect.Offset(f.rect.X, f.rect.Y);
currentFrame.Draw(eyeRect, new Bgr(Color.Green), 2);
}
}
//Mostrar el video procesado
imageBoxFrameGrabber.Image = currentFrame;
}
示例3: Detect
public void Detect(byte[] pixels, int width, int height) {
// Build Image
Bitmap bitmap = WSRKinectSensor.ToBitmap(pixels, width, height, PixelFormat.Format32bppRgb);
Image<Bgr, Byte> color = new Image<Bgr, Byte>(bitmap);
// Convert it to Grayscale
Gray = color.Convert<Gray, Byte>();
// Detect faces
Faces = Gray.DetectHaarCascade(haarCascade, 1.2, 2, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(80, 80))[0];
// Train if needed
Train();
}
示例4: DetectAndDrawEyes
private static void DetectAndDrawEyes(Image<Bgr, byte> image, Image<Gray, byte> gray, MCvAvgComp f, HaarCascade eye)
{
gray.ROI = f.rect;
MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
eye,
1.1,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
gray.ROI = Rectangle.Empty;
foreach (MCvAvgComp e in eyesDetected[0])
{
Rectangle eyeRect = e.rect;
eyeRect.Offset(f.rect.X, f.rect.Y);
image.Draw(eyeRect, new Bgr(Color.Red), 2);
}
}
示例5: DoNormalDetection
// FaceDetection in the normal way
public override void DoNormalDetection(string imagePath)
{
_image = new Image<Bgr, byte>(imagePath); //Read the files as an 8-bit Bgr image
_egray = _image.Convert<Gray, Byte>(); //Convert it to Grayscale
_gray = _egray.Copy(); // Copy image in Grayscale
_egray._EqualizeHist(); // Equalize
Image<Gray, Byte> tempgray = _egray.Copy();
MCvAvgComp[][] facesDetected = _egray.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));
foreach (MCvAvgComp f in facesDetected[0])
{
if (f.neighbors > 100)
{
//_image.Draw(f.rect, new Bgr(System.Drawing.Color.Blue), 2); // face
tempgray.ROI = f.rect; //Set the region of interest on the faces
MCvAvgComp[][] eyesDetected = tempgray.DetectHaarCascade(_eyes, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));
if (eyesDetected[0].Length != 0)
{
foreach (MCvAvgComp e in eyesDetected[0])
{
if (e.neighbors > 100)
{
System.Drawing.Rectangle eyeRect = e.rect;
eyeRect.Offset(f.rect.X, f.rect.Y);
_image.Draw(eyeRect, new Bgr(System.Drawing.Color.Red), 2);
}
}
}
}
}
this._processedImages = new IImage[3];
this._processedImages[0] = _gray;
this._processedImages[1] = _egray;
this._processedImages[2] = _image;
}
示例6: DoEyesRegionExtraction
private bool DoEyesRegionExtraction(Image<Gray, Byte> input, TrackData trackData)
{
// We assume there's only one face in the video
MCvAvgComp[][] facesDetected = input.DetectHaarCascade(
haarCascade,
Settings.Instance.Eyestracker.ScaleFactor,
2, //Min. neighbours, higher value reduces false detection
HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT,
Settings.Instance.Eyestracker.SizeMin);
if (facesDetected[0].Length == 1)
{
MCvAvgComp face = facesDetected[0][0];
if (face.rect.X != 0 && face.rect.Width != 0)
{
if (face.rect.Height < 100)
return false;
roiEyes = face.rect;
// Add some margin
//roiEyes.Y = Convert.ToInt32(roiEyes.Y * 0.90);
roiEyes.X = Convert.ToInt32(roiEyes.X*0.85);
roiEyes.Height = Convert.ToInt32(roiEyes.Height*1.2);
roiEyes.Width = Convert.ToInt32(roiEyes.Width*1.4);
foundEyes = true;
trackData.EyesROI = roiEyes;
}
}
else
{
foundEyes = false;
roiEyes = new Rectangle(new Point(0, 0), new Size(0, 0));
}
Performance.Now.Stamp("Eyes X:" + roiEyes.X + " Y:" + roiEyes.Y + " W:" + roiEyes.Width + " H:" +
roiEyes.Height);
return foundEyes;
}
示例7: DetectFaces
public IList<RoI> DetectFaces(Image image, EnumDetectionType detectionType, double scale, int minNeighbors, Size minSize)
{
IList<RoI> rois = new List<RoI>();
// convert to openCV format
HAAR_DETECTION_TYPE detectionTypeOpenCV = EnumHelper.StringToEnum<HAAR_DETECTION_TYPE>(detectionType.ToString());
try
{
Image<Gray, byte> gray = new Image<Gray, byte>(new Bitmap(image));
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
scale,
minNeighbors,
detectionTypeOpenCV,
minSize
);
// create a RoI object for every detected face
foreach (MCvAvgComp f in facesDetected[0])
{
RoI roi = new RoI();
roi.X = f.rect.X;
roi.Y = f.rect.Y;
roi.Width = f.rect.Width;
roi.Height = f.rect.Height;
rois.Add(roi);
}
}
catch
{
throw new FaceDetectionException("Error while detecting faces!");
}
return rois;
}
示例8: FillBoxByFace
public void FillBoxByFace(ImageBox image)
{
//Get a gray frame from capture device
gray = grabber.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face,1.2,10,Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
TrainedFace = currentFrame.Copy(f.rect).Convert<Gray, byte>();
break;
}
//resize face detected image for force to compare the same size with the
//test image with cubic interpolation type method
TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//trainingImages.Add(TrainedFace);
//labels.Add(textBox1.Text);
//Show face added in gray scale
image.Image = TrainedFace;
}
示例9: FrameGrabber
void FrameGrabber(object sender, EventArgs e)
{
label3.Text = "0";
//label4.Text = "";
NamePersons.Add("");
//mengambil queryFrame dari gambar
DateTime StarTime = DateTime.Now;
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//konversi
gray = currentFrame.Convert<Gray, Byte>();
//yang terdeteksi
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Aksi untuk setiap elemen terdeteksi
foreach (MCvAvgComp f in facesDetected[0])
{
t = t + 1;
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);
if (trainingImages.ToArray().Length != 0)
{
MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
trainingImages.ToArray(),
labels.ToArray(),
3000,
ref termCrit);
name = recognizer.Recognize(result);
currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
}
NamePersons[t-1] = name;
NamePersons.Add("");
//jumlah yang terdeteksi
label3.Text = facesDetected[0].Length.ToString();
}
t = 0;
//nama yang terdeteksi
for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
{
names = names + NamePersons[nnn] + ", ";
}
//tampilan pada imageboxframeGrabber
imageBoxFrameGrabber.Image = currentFrame;
DateTime endTime = DateTime.Now;
textBox2.Text = (endTime - StarTime).ToString();
label4.Text = names;
names = "";
NamePersons.Clear();
}
示例10: FrameGrabber
void FrameGrabber(object sender, EventArgs e)
{
try
{
// NamePersons.Add("");
currentFrame = grabber.QueryFrame().Resize(640, 480, INTER.CV_INTER_CUBIC);
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.1,
5,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(60, 60));
SoNguoi = 0;
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
t = t + 1;
resultface = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, INTER.CV_INTER_CUBIC);
sf = f.rect.Width / 100.0;
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.Green), 2);
grayf = resultface.Resize(30, 30, INTER.CV_INTER_CUBIC);
Bitmap tam = grayf.ToBitmap();
//Bitmap tamnewsize = new Bitmap(tam, newsizegb);
matrixtam = PCA.image_2_matrix(tam);
matrixtam = Radon1.ApdungRadon(matrixtam);//PCA.apDungWaveletGabors(matrixtam, 0, 1.56, 1);
/////////////////////////////////////////
#region detect eye, nose, mouth
//phat hien mat
//eye detect
grayf = resultface;
rte.X = 0; rte.Y = 15;
rte.Width = 100;
rte.Height = 40;
graye = grayf.Copy(rte).Convert<Gray, byte>();
MCvAvgComp[][] eyesDetected = graye.DetectHaarCascade(
eye,
1.02,
5,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
int k = 0;
foreach (MCvAvgComp es in eyesDetected[0])
{
rt.X = (int)(sf * es.rect.X) + f.rect.X;
rt.Y = (int)(sf * (es.rect.Y + 15)) + (int)((f.rect.Y));
rt.Width = (int)(26 * sf);
rt.Height = (int)(26 * sf);
currentFrame.Draw(rt, new Bgr(Color.Yellow), 2);
rt.X = es.rect.X; rt.Y = es.rect.Y + 15;
rt.Width = 23;
rt.Height = 23;
graytam = grayf.Copy(rt).Convert<Gray, byte>();
if (rt.X > 50)
{
this.ibe1.Image = graytam;
resulteyeR = graytam;
}
else
if (rt.X <= 50)
{
this.ibe2.Image = graytam;
resulteyeL = graytam;
}
k++;
if (k == 2) break;
}
////////////////////////////////////////////////////cat mouth tren grayface
rtm.X = 0; rtm.Y = 60;
rtm.Width = 100;
rtm.Height = 40;
grayfm = grayf.Copy(rtm).Convert<Gray, byte>();
////////////////////////////////////////
//mouth detector
MCvAvgComp[][] mouthsDetected = grayfm.DetectHaarCascade(
mouth,
1.1,
5,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT,
//.........这里部分代码省略.........
示例11: getFaceTag
internal string getFaceTag(Bitmap sourceBmp)
{
//Get the current frame form capture device
currentFrame = new Image<Bgr, byte>(sourceBmp).Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
if (currentFrame != null)
{
gray_frame = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray_frame.DetectHaarCascade(
Face,
1.2,
1,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new System.Drawing.Size(20, 20));
foreach (MCvAvgComp f in facesDetected[0])
{
t = t + 1;
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
//currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);
if (trainingImages.ToArray().Length != 0)
{
//TermCriteria for face recognition with numbers of trained images like maxIteration
MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
trainingImages.ToArray(),
labels.ToArray(),
3000,
ref termCrit);
name = recognizer.Recognize(result) ;
if (!name.Equals("")&&name!=null)
{
return name;
}
}
}
}
return "Sanmeet" ;
}
示例12: FrameGrabber
public void FrameGrabber(object sender, EventArgs e)
{
_lastInfo = new List<HeadInformation>();
CountOfFacesLabel.Text = "0";
//label4.Text = "";
NamePersons.Add("");
//Get the current frame form capture device
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
t = t + 1;
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);
if (trainingImages.ToArray().Length != 0)
{
//TermCriteria for face recognition with numbers of trained images like maxIteration
MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
trainingImages.ToArray(),
labels.ToArray(),
3000,
ref termCrit);
name = recognizer.Recognize(result);
//Draw the label for each face detected and recognized
currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.Red));
}
NamePersons[t - 1] = name;
NamePersons.Add("");
//Set the number of faces detected on the scene
CountOfFacesLabel.Text = facesDetected[0].Length.ToString();
//Set the region of interest on the faces
gray.ROI = f.rect;
MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
eye,
1.9,
5,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
gray.ROI = Rectangle.Empty;
foreach (MCvAvgComp ey in eyesDetected[0])
{
Rectangle eyeRect = ey.rect;
eyeRect.Inflate(-7, -7);
eyeRect.Offset(f.rect.X, f.rect.Y);
currentFrame.Draw(eyeRect, new Bgr(Color.Blue), 2);
}
//gray.ROI = f.rect;
//MCvAvgComp[][] mouthDetected = gray.DetectHaarCascade(
// mouth,
// 1.1,
// 37,
// Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
// new Size(20, 20));
//gray.ROI = Rectangle.Empty;
//foreach (MCvAvgComp ey in mouthDetected[0])
//{
// Rectangle mouthRect = ey.rect;
// mouthRect.Offset(f.rect.X, f.rect.Y);
// currentFrame.Draw(mouthRect, new Bgr(Color.Black), 2);
//}
gray.ROI = f.rect;
MCvAvgComp[][] smileDetected = gray.DetectHaarCascade(
smile,
2,
20,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
gray.ROI = Rectangle.Empty;
HeadInformation hi = new HeadInformation();
//.........这里部分代码省略.........
示例13: addface_Click
private void addface_Click(object sender, EventArgs e)
{
try
{
if (textBox1.Text == "" | loptxt.Text == "" | mssvtxt.Text == "") MessageBox.Show("Chưa nhập đủ thông tin");
else
{
gray = grabber.QueryGrayFrame().Resize(640, 480, INTER.CV_INTER_CUBIC);
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.1,
5,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT,
new Size(60, 60));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
// resultface = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
TrainedFace = currentFrame.Copy(f.rect).Convert<Gray, byte>();
break;
}
if (resultface == null) { timer3.Start(); return; }
//resize face detected image for force to compare the same size with the
//test image with cubic interpolation type method
TrainedFace = resultface.Resize(100, 100, INTER.CV_INTER_CUBIC);
//them ten va face vao mang
//Show face added in gray scale
//if (dem == 0)
imageBox1.Image = TrainedFace;
try
{
//TrainedFace.Save(directory + "face" + matrix1s.Count + ".bmp");
grabber.QueryFrame().Resize(640, 480, INTER.CV_INTER_CUBIC).Save(directory + textBox1.Text + matrix1s.Count + ".bmp");
}
catch (Exception ex)
{
for (int i = matrix1s.Count; i < dem; i++)
{
// File.Delete(directory + "face" + (matrix1s.Count + dem) + ".bmp");
File.Delete(directory + textBox1.Text + (matrix1s.Count + dem) + ".bmp");
}
}
//tface.Add(TrainedFace);
TrainedFace = TrainedFace.Resize(50, 50, INTER.CV_INTER_CUBIC);
Bitmap tam = TrainedFace.ToBitmap();
Bitmap bmnewsize = new Bitmap(tam, newsizegb);
x = PCA.image_2_matrix(bmnewsize);
x = Radon1.ApdungRadon(x);// PCA.apDungWaveletGabors(x, 0, 1.56, 1);
matrix1stam.Add(x);
matrix1s.Add(x);
labels.Add(mssvtxt.Text);
if (dem != 9)
addface.Text = "Add face " + (dem + 2).ToString();
dem++;
if (dem == 10)
{
luuanh();
MessageBox.Show(textBox1.Text + "'s Face detected and added :)", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);
dem = 0; mauso = 1;
// tface = null; tface = new List<Image<Gray, byte>>();
matrix1stam = null; matrix1stam = new List<Matrix1>();
x = null;
imageBox1.Image = null;
ibe1.Image = ibe2.Image = ibn.Image = ibm.Image = null;
addface.Text = "Add face 1";
resultface = resulteyeL = resulteyeR = resultmouth = resultnose = null;
refreshdata();
}
HDfaces++;
mauso++;
if (HDfaces <= 10)
{
label9.Text = mauso.ToString();
pictureBox1.Image = Image.FromFile(Application.StartupPath.ToString() + "/huongdan/" + HDfaces.ToString() + ".bmp");
//MessageBox.Show(HDfaces.ToString());
}
}
}
catch (Exception ex)
{
dem = 0;
MessageBox.Show(ex.ToString(), "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
}
}
示例14: FrameGrabber2
public void FrameGrabber2(object sender, EventArgs e)
{
NamePersons.Add("");
face = new HaarCascade("haarcascade_frontalface_default.xml");
//Utility UTl = new Utility();
//Get the current frame form capture device
//Image<Bgr, Byte> currentFrame = UTl.ImageToBgrByte(Image);
try
{
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
}
catch (Exception exp)
{
grabber = new Capture("video002.mp4");
}
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
//Action for element detected
try
{
MCvAvgComp f = facesDetected[0][0];
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.White), 2);
}
catch (Exception ex)
{
//MessageBox.Show("Camera Error: Empty frames arrived" + ex.Message.ToString(), "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
if (trainingImages.ToArray().Length != 0)
{
//TermCriteria for face recognition with numbers of trained images like maxIteration
MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
trainingImages.ToArray(),
labels.ToArray(),
3000,
ref termCrit);
name = recognizer.Recognize(result);
//Draw the label for each face detected and recognized
//currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
}
//NamePersons[t - 1] = name;
NamePersons.Add("");
t = 0;
//Names concatenation of persons recognized
//for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
//{
// names = names + NamePersons[nnn] + ", ";
//}
//Show the faces procesed and recognized
emguImgFace.Image = currentFrame;
lblCandidateID.Text = name;
name = "";
//Clear the list(vector) of names
NamePersons.Clear();
}
示例15: FrameGrabber
public void FrameGrabber(object sender, EventArgs e)
{
lbl3 = "0";
lbl4 = "";
NamePersons.Add("");
//Get the current frame form capture device
try
{
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
}
catch { }
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
t = t + 1;
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);
if (trainingImages.ToArray().Length != 0)
{
//UpdateRecognizer();
name = recognizer.Recognize(new Image<Gray,byte>( ImageProcessing.ImagePreProcessing(result.ToBitmap())));
//Draw the label for each face detected and recognized
currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
}
NamePersons[t - 1] = name;
NamePersons.Add("");
//Set the number of faces detected on the scene
lbl3 = facesDetected[0].Length.ToString();
}
t = 0;
//Names concatenation of persons recognized
for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
{
names = names + NamePersons[nnn] + ", ";
}
//Show the faces procesed and recognized
pictureBoxFrameGrabber.Image = currentFrame.ToBitmap();
lbl3 = names;
names = "";
//Clear the list(vector) of names
NamePersons.Clear();
}