本文整理汇总了C#中Face.CalculateFovAndCoords方法的典型用法代码示例。如果您正苦于以下问题:C# Face.CalculateFovAndCoords方法的具体用法?C# Face.CalculateFovAndCoords怎么用?C# Face.CalculateFovAndCoords使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Face
的用法示例。
在下文中一共展示了Face.CalculateFovAndCoords方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: RecognizeMultipleFaces
/// <summary>
/// Recognizes multiple faces from a single image frame
/// </summary>
/// <param name="image">Neurotec Image in which is based the face recognition</param>
/// <param name="vlFaces">Array of faces detected</param>
/// <param name="recognizedFaces">Array of Face objects used to realize each recognition</param>
/// <param name="MultipleRecognitionResults">An array containing all recognition results for each recognized face</param>
/// <returns>An array containing best match in all known faces.</returns>
private RecognitionResult[] RecognizeMultipleFaces(NImage image, VleFace[] vlFaces, out FaceCollection detectedFaces, out RecognitionResult[][] MultipleRecognitionResults)
{
#region Variables
// Stores the original image as bitmap
Bitmap bmp;
// Bitmap to draw in the detected face region
Bitmap croppedBitmap;
// Graphics used to copy the face detected region
Graphics g;
// Rectangle used to copy the scaled region of face detected
Rectangle rect;
// Nurotec Image required in the process of recognize the face detected region
NGrayscaleImage gray;
// Verilook Detetion Details as result of face recognition
VleDetectionDetails detectionDetails;
// The face template result of a face recognition
byte[][] templates = new byte[vlFaces.Length][];
// The face features result of a face recognition
byte[] features;
// Stores the current recognition face
Face currentFace;
// Stores the recognized faces
//FaceCollection recognizedFaces = new FaceCollection(vlFaces.Length);
detectedFaces = new FaceCollection(vlFaces.Length);
// Stores the best recognition result for current face
RecognitionResult currentResult;
// Stores the recognition results for current face
RecognitionResult[] currentRecognitionResults;
// Stores all Recognition results
List<RecognitionResult[]> recognitionResults = new List<RecognitionResult[]>();
// Stores the best recognition result matches
List<RecognitionResult> selectedResults = new List<RecognitionResult>();
#endregion
// Get the original image as bitmap
bmp = new Bitmap(image.ToBitmap());
// Extract each face, and get its template
foreach (VleFace vlFace in vlFaces)
{
// Get a rectangle a bit larger than the one the face has been recognized.
// Its because some times in the exact area of the face the face cannot be recognized again
//rect = new Rectangle(vlFace.Rectangle.X - 50, vlFace.Rectangle.Y - 50, vlFace.Rectangle.Width + 100, vlFace.Rectangle.Height + 100);
rect = new Rectangle(vlFace.Rectangle.X - vlFace.Rectangle.Width / 2, vlFace.Rectangle.Y - vlFace.Rectangle.Height / 2, vlFace.Rectangle.Width * 2, vlFace.Rectangle.Height * 2);
// Get the face bitmap
croppedBitmap = new Bitmap(rect.Width, rect.Height);
g = Graphics.FromImage(croppedBitmap);
g.DrawImage(bmp, 0, 0, rect, GraphicsUnit.Pixel);
// Get gray image for face detection
gray = (NGrayscaleImage)NImage.FromImage(NPixelFormat.Grayscale, 0, NImage.FromBitmap(croppedBitmap));
// Extract the face and extract its template
currentFace = new Face(vlFace);
features = vlExtractor.Extract(gray, out detectionDetails);
if (!detectionDetails.FaceAvailable) continue;
UseResources();
currentFace.SetRecognitionData(features, detectionDetails, croppedBitmap);
ReleaseResources();
currentFace.CalculateFovAndCoords((int)image.Width, (int)image.Height);
detectedFaces.Add(currentFace);
Console("Found face: location = (" + detectionDetails.Face.Rectangle.X + ", " + detectionDetails.Face.Rectangle.Y + "), width = " + detectionDetails.Face.Rectangle.Width + ", height = " + detectionDetails.Face.Rectangle.Height + ", confidence = " + detectionDetails.Face.Confidence);
try
{
croppedBitmap.Dispose();
g.Dispose();
gray.Dispose();
}
catch { }
}
if (detectedFaces.Count > 0) Console(detectedFaces.Count.ToString() + " faces found.");
if (knownFaces.Count > 0)
{
Console("Initializing recognition");
// Recognize each detected face
for (int i = 0; i < detectedFaces.Count; ++i)
{
if (detectedFaces[i].Features == null) continue;
currentFace = detectedFaces[i];
// Start recognition
currentResult = Recognize(currentFace, out currentRecognitionResults);
if (currentResult == null) continue;
selectedResults.Add(currentResult);
recognitionResults.Add(currentRecognitionResults);
}
}
MultipleRecognitionResults = recognitionResults.ToArray();
return selectedResults.ToArray();
}