本文整理汇总了C#中Image._EqualizeHist方法的典型用法代码示例。如果您正苦于以下问题:C# Image._EqualizeHist方法的具体用法?C# Image._EqualizeHist怎么用?C# Image._EqualizeHist使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image._EqualizeHist方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Detection
private void Detection(object r, EventArgs e)
{
currentFrame = grabber.QueryFrame();
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
DetectFace.Detect(currentFrame, "haarcascade_frontalface_default.xml", faces, out detectionTime);
foreach (Rectangle face in faces)
{ //result = currentFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
currentFrame.Draw(face, new Bgr(Color.Red), 2);
//Get copy of img and show it
result = currentFrame.Copy(face).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //making small copy of face
result._EqualizeHist();
if (Eigen_Recog.IsTrained)
{
string name = Eigen_Recog.Recognise(result);
//Draw the label for each face detected and recognized
currentFrame.Draw(name, ref font, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
}
//display the image
ImageViewer.Image = currentFrame;
labelTimeSpend.Text = detectionTime.ToString() + "msec";
faces.Clear();
currentFrame.Dispose();
}
示例2: JanelaDetectarFace
public JanelaDetectarFace(Mat pImagem)
{
InitializeComponent();
mImagem = pImagem;
// currentFrame = new Image<Bgr, byte>(new Size(320, 240));
// CvInvoke.Resize(mImagem, currentFrame, new Size(320, 240), 0, 0, Emgu.CV.CvEnum.Inter.Cubic);
// imagemDetect.Image = currentFrame.ToBitmap();
if (Eigen_Recog.IsTrained)
{
// message_bar.Text = "Training Data loaded";
}
else
{
//message_bar.Text = "No training data found, please train program using Train menu option";
}
currentFrame = new Image<Bgr, byte>(new Size(820, 780));
CvInvoke.Resize(mImagem, currentFrame, new Size(820, 780), 0, 0, Emgu.CV.CvEnum.Inter.Cubic);
//Convert it to Grayscale
if (currentFrame != null)
{
gray_frame = currentFrame.Convert<Gray, Byte>();
//Face Detector
Rectangle[] facesDetected = Face.DetectMultiScale(gray_frame, 1.2, 10, new Size(50, 50), Size.Empty);
//Action for each element detected
for (int i = 0; i < facesDetected.Length; i++)// (Rectangle face_found in facesDetected)
{
//This will focus in on the face from the haar results its not perfect but it will remove a majoriy
//of the background noise
facesDetected[i].X += (int)(facesDetected[i].Height * 0.15);
facesDetected[i].Y += (int)(facesDetected[i].Width * 0.22);
facesDetected[i].Height -= (int)(facesDetected[i].Height * 0.3);
facesDetected[i].Width -= (int)(facesDetected[i].Width * 0.35);
result = currentFrame.Copy(facesDetected[i]).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.Inter.Cubic);
result._EqualizeHist();
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(facesDetected[i], new Bgr(Color.Red), 2);
if (Eigen_Recog.IsTrained)
{
string name = Eigen_Recog.Recognise(result);
int match_value = (int)Eigen_Recog.Get_Eigen_Distance;
//Draw the label for each face detected and recognized
currentFrame.Draw(name + "", new Point(facesDetected[i].X - 2, facesDetected[i].Y - 2), Emgu.CV.CvEnum.FontFace.HersheyDuplex, 1, new Bgr(Color.LightGreen));
// currentFrame.Draw(name + " ", ref font, new Point(facesDetected[i].X - 2, facesDetected[i].Y - 2), new Bgr(Color.LightGreen));
// ADD_Face_Found(result, name, match_value);
}
}
//Show the faces procesed and recognized
imagemDetect.Image = currentFrame.ToBitmap();
}
}
示例3: Detect
public void Detect(Image<Gray, Byte> sourceImage, List<Rectangle> objects)
{
//normalize image
sourceImage._EqualizeHist();
//detect objects
Rectangle[] detectedObjects = Classifier.DetectMultiScale(sourceImage, 1.1, 10, new Size(40, 40), Size.Empty);
//add detected face(s) to the list
objects.AddRange(detectedObjects);
}
示例4: Form1
public Form1()
{
InitializeComponent();
recognizer = new LBPHFaceRecognizer(1, 8, 8, 9, 65);
classifier = new CascadeClassifier(haarcascade);
GPU_classifier = new GpuCascadeClassifier(haarcascade_cuda);
font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5, 0.5);
if (File.Exists(@"traningdata.xml"))
{
recognizer.Load(@"traningdata.xml");
}
else
{
foreach (var file in Directory.GetFiles(Application.StartupPath + @"\Traning Faces\"))
{
try { temp = new Image<Gray, Byte>(file); }
catch { continue; }
temp._EqualizeHist();
var detectedFaces = classifier.DetectMultiScale(temp, 1.1, 15, new Size(24, 24), Size.Empty);
if (detectedFaces.Length == 0)
{
continue;
}
temp.ROI = detectedFaces[0];
temp = temp.Copy();
temp = temp.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
imagesList.Add(temp);
imagesLabels.Add(Path.GetFileNameWithoutExtension(file));
}
for (int i = 0; i < imagesList.Count; i++)
{
imagesLabels_indices.Add(i);
}
try { recognizer.Train(imagesList.ToArray(), imagesLabels_indices.ToArray()); }
catch (Exception ex)
{
MessageBox.Show(ex.Message);
Environment.Exit(0);
}
}
}
示例5: ObjectTracking
public ObjectTracking(Image<Bgr, Byte> image, Rectangle ROI)
{
// Initialize parameters
trackbox = new MCvBox2D();
trackcomp = new MCvConnectedComp();
hue = new Image<Gray, byte>(image.Width, image.Height);
hue._EqualizeHist();
mask = new Image<Gray, byte>(image.Width, image.Height);
hist = new DenseHistogram(30, new RangeF(0, 180));
backproject = new Image<Gray, byte>(image.Width, image.Height);
// Assign Object's ROI from source image.
trackingWindow = ROI;
// Producing Object's hist
CalObjectHist(image);
}
示例6: Detect
public static void Detect(Image<Gray, byte> face)
{
//normalize
face._EqualizeHist();
//need pair of eyes. if not present, left and right eyes considered not detected
Rectangle eyePairPos = DetectPairEyes(face);
if (!eyePairPos.Equals(Rectangle.Empty))
{
//detecting each eyes
Image<Gray, byte> eyePairImage = face.Copy(eyePairPos);
BlinkStateManager.leftEyeDetected = DetectLeftEye(eyePairImage);
BlinkStateManager.rightEyeDetected = DetectRighEye(eyePairImage);
}
else
{
BlinkStateManager.leftEyeDetected = false;
BlinkStateManager.rightEyeDetected = false;
}
}
示例7: fnFindFacesThread
void fnFindFacesThread()
{
while (IsRunning)
{
if (FSource.FrameChanged)
lock(this)
{
FGrayImage = FSource.Img.Convert<Gray, Byte>();
var stride = (FGrayImage.Width * 3);
var align = stride % 4;
if (align != 0)
{
stride += 4 - align;
}
FGrayImage._EqualizeHist();
MCvAvgComp[] faceDetected = FHaarCascade.Detect(FGrayImage, 1.8, 4, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(FGrayImage.Width / 8, FGrayImage.Height / 8));
Faces.Clear();
foreach (MCvAvgComp f in faceDetected)
{
FaceTrackingFace face = new FaceTrackingFace();
var faceVector = new Vector2D(f.rect.X + f.rect.Width / 2, f.rect.Y + f.rect.Height / 2);
Vector2D CMaximumSourceXY = new Vector2D(FGrayImage.Width, FGrayImage.Height);
face.Position = VMath.Map(faceVector, CMinimumSourceXY, CMaximumSourceXY, CMinimumDestXY, CMaximumDestXY, TMapMode.Float);
face.Scale = VMath.Map(new Vector2D(f.rect.Width, f.rect.Height), CMinimumSourceXY.x, CMaximumSourceXY.x, 0, 2, TMapMode.Float);
Faces.Add(face);
}
}
}
}
示例8: DoNormalDetection
// FaceDetection in the normal way
public override void DoNormalDetection(string imagePath)
{
_image = new Image<Bgr, byte>(imagePath); //Read the files as an 8-bit Bgr image
_egray = _image.Convert<Gray, Byte>(); //Convert it to Grayscale
_gray = _egray.Copy(); // Copy image in Grayscale
_egray._EqualizeHist(); // Equalize
Image<Gray, Byte> tempgray = _egray.Copy();
MCvAvgComp[][] facesDetected = _egray.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));
foreach (MCvAvgComp f in facesDetected[0])
{
if (f.neighbors > 100)
{
//_image.Draw(f.rect, new Bgr(System.Drawing.Color.Blue), 2); // face
tempgray.ROI = f.rect; //Set the region of interest on the faces
MCvAvgComp[][] eyesDetected = tempgray.DetectHaarCascade(_eyes, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));
if (eyesDetected[0].Length != 0)
{
foreach (MCvAvgComp e in eyesDetected[0])
{
if (e.neighbors > 100)
{
System.Drawing.Rectangle eyeRect = e.rect;
eyeRect.Offset(f.rect.X, f.rect.Y);
_image.Draw(eyeRect, new Bgr(System.Drawing.Color.Red), 2);
}
}
}
}
}
this._processedImages = new IImage[3];
this._processedImages[0] = _gray;
this._processedImages[1] = _egray;
this._processedImages[2] = _image;
}
示例9: Detect
public static void Detect(Image<Gray, Byte> sourceImage, List<Rectangle> faces)
{
//normalize image
sourceImage._EqualizeHist();
//detect faces
Rectangle[] detectedFaces = faceClassifier.DetectMultiScale(sourceImage, 1.1, 10, new Size(100, 100), Size.Empty);
int heightInflate = 5, yOffset = -20;
for (int i = 0; i < detectedFaces.Length; i++)
{
if (
detectedFaces[i].Top - (detectedFaces[i].Height / heightInflate) + (detectedFaces[i].Height / yOffset) > 0
&&
detectedFaces[i].Bottom + (detectedFaces[i].Width / heightInflate) - (detectedFaces[i].Height / yOffset) < sourceImage.Height)
{
Rectangle temp = detectedFaces[i];
temp.Offset(0, detectedFaces[i].Height / yOffset);
temp.Inflate(0, detectedFaces[i].Height / heightInflate);
faces.Add(temp);
}
}
}
示例10: Segm_Process
void Segm_Process()
{
//преобразование изображения в чб
imgProcessed = imgOriginal.Convert<Gray, Byte>();
//автоконтраст
if (equalizeHist)
imgProcessed._EqualizeHist();
//фильтр шума
Image<Gray, byte> smoothedGrayFrame = imgProcessed.PyrDown();
smoothedGrayFrame = smoothedGrayFrame.PyrUp();
Image<Gray, byte> cannyFrame = null;
//поиск контуров, если работает с фильтром шума
if (noiseFilter)
cannyFrame = smoothedGrayFrame.Canny(nfVal, nfVal);
//затемнение
if (blur)
imgProcessed = smoothedGrayFrame;
//пороговое преобразование
CvInvoke.cvAdaptiveThreshold(imgProcessed, imgProcessed, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 4 + 4 % 2 + 1, thresVal);
//белое в черное
imgProcessed._Not();
try
{
if (cannyFrame != null)
imgProcessed._Or(cannyFrame);
}
catch { }
if (cannyFrame != null)
cannyFrame = cannyFrame.Dilate(3);
//поиск контуров
var sourceContours = imgProcessed.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
//фильтруем контуры
contours = FilterContours(sourceContours, cannyFrame, imgProcessed.Width, imgProcessed.Height);
ibOriginal.Image = imgProcessed;
}
示例11: piecesCheck
private Image<Hls, Byte> piecesCheck(Image<Bgr, Byte> img)
{
Image<Hls, Byte> result = new Image<Hls, byte>(img.Bitmap).PyrDown().PyrUp();
if (gaussian == true)
result = result.SmoothGaussian(gaussianValue);
if (contrast == true)
result._EqualizeHist();
//result[2] += saturation;
int countBlack;
int countWhite;
for (int i = 0; i < 32; i++)
{
int x = (int)boxList[i].center.X;
int y = (int)boxList[i].center.Y;
countWhite = 0;
countBlack = 0;
byte asd = result.Data[y, x, 1];
if (asd > whiteLightness)
{
//countWhite++;
result.Draw(new CircleF(boxList[i].center, 3), new Hls(120, 50, 100), 3);
}
if (asd < blackLightness)
{
//countBlack++;
result.Draw(new CircleF(boxList[i].center, 3), new Hls(220, 60, 100), 3);
}
}
return result;
}
示例12: piecesCheck2
private Image<Hls, Byte> piecesCheck2(Image<Bgr, Byte> img)
{
Image<Hls, Byte> result = new Image<Hls, byte>(img.Bitmap).PyrDown().PyrUp();
Game a = new Game(leftRadio.IsChecked.Value);
if (gaussian == true)
result = result.SmoothGaussian(gaussianValue);
if (contrast == true)
result._EqualizeHist();
//result[2] += saturation;
int countBlack;
int countWhite;
List<int> gameState = new List<int>();
for (int i = 0; i < 32; i++)
{
gameState.Add(2);
}
for (int i = 0; i < 32; i++)
{
int x = (int)boxList[i].center.X;
int y = (int)boxList[i].center.Y;
countWhite = 0;
countBlack = 0;
byte asd = result.Data[y, x, 1];
if (asd > whiteLightness)
{
countWhite++;
gameState[i] = 0;
result.Draw(new CircleF(boxList[i].center, 3), new Hls(120, 50, 100), 3);
}
if (asd < blackLightness)
{
countBlack++;
gameState[i] = 1;
result.Draw(new CircleF(boxList[i].center, 3), new Hls(220, 60, 100), 3);
}
}
previousGame = a;
a.updateStatus(gameState);
currentGame = a;
return result;
}
示例13: SaveTrainingData
public static bool SaveTrainingData(Image<Gray, byte> image, String name)
{
image = image.Resize(200, 200, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC, false);//resize
image._EqualizeHist();
String uniqueNumber = DateTime.Now.ToString("yyMMddHHmmss");
String filename = "face_" + name + "_" + uniqueNumber + ".jpg";
if (!Directory.Exists(Application.StartupPath + "\\TrainedFaces\\"))
Directory.CreateDirectory(Application.StartupPath + "\\TrainedFaces\\");
image.ToBitmap().Save(Application.StartupPath + "\\TrainedFaces\\" + filename, ImageFormat.Jpeg);
XmlDocument xmlFile = new XmlDocument();
if (File.Exists(Application.StartupPath + "\\TrainedFaces\\TrainedLabels.xml"))
{
bool loading = true;
while (loading)
{
try
{
xmlFile.Load(Application.StartupPath + "\\TrainedFaces\\TrainedLabels.xml");
loading = false;
}
catch
{
xmlFile = null;
xmlFile = new XmlDocument();
Thread.Sleep(10);
}
}
XmlElement root = xmlFile.DocumentElement;
XmlElement face_D = xmlFile.CreateElement("FACE");
XmlElement name_D = xmlFile.CreateElement("NAME");
XmlElement file_D = xmlFile.CreateElement("FILE");
name_D.InnerText = name;
file_D.InnerText = filename;
face_D.AppendChild(name_D);
face_D.AppendChild(file_D);
root.AppendChild(face_D);
xmlFile.Save(Application.StartupPath + "\\TrainedFaces\\TrainedLabels.xml");
}
else
{
FileStream fileStream = File.OpenWrite(Application.StartupPath + "\\TrainedFaces\\TrainedLabels.xml");
using (XmlWriter xmlWriter = XmlWriter.Create(fileStream))
{
xmlWriter.WriteStartDocument();
xmlWriter.WriteStartElement("Faces_For_Training");
xmlWriter.WriteStartElement("FACE");
xmlWriter.WriteElementString("NAME", name);
xmlWriter.WriteElementString("FILE", filename);
xmlWriter.WriteEndElement();
xmlWriter.WriteEndElement();
xmlWriter.WriteEndDocument();
}
fileStream.Dispose();
}
return true;
}
示例14: TrainFrame
private bool TrainFrame(int newid)
{
try
{
Image<Gray, byte> darkimage = new Image<Gray, byte>(ROIwidth, ROIheight);
Image<Gray, byte> cropimage = new Image<Gray, byte>(ROIwidth, ROIheight);
//ArrayList pic = new ArrayList();
if (loadImage != null)
{
var faces = face.Detect(loadImage, 1.3, 6, HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(120, 120), new Size(200, 200));
if (faces.Length > 0)
{
foreach (var facecount in faces)
{
facePosition = new Point(facecount.rect.X, facecount.rect.Y);
var eyeObjects = eyeWithGlass.DetectMultiScale(loadImage, 1.3, 6, minEye, maxEye);
if (eyeObjects.Length == 2)
{
Console.WriteLine("eye");
if (eyeObjects[0].X > eyeObjects[1].X)
{
var temp = eyeObjects[0];
eyeObjects[0] = eyeObjects[1];
eyeObjects[1] = temp;
}
int betweeneLength = eyeObjects[1].X - eyeObjects[0].X;
int lefteyebrowpoint = eyeObjects[0].X;//
int righteyebrowpoint = eyeObjects[0].X + betweeneLength + eyeObjects[1].Width;//
int xxx = (int)((1.5 / 8.0) * betweeneLength);
int neareyebrowpoint = (int)(0.2 * betweeneLength);
int faceheight = (int)(2.3 * betweeneLength);
loadImage.ROI = new Rectangle(new Point(lefteyebrowpoint - xxx, eyeObjects[0].Y - neareyebrowpoint), new Size((righteyebrowpoint + xxx) - (lefteyebrowpoint - xxx), faceheight));
cropimage = loadImage.Copy().Resize(ROIwidth, ROIheight, INTER.CV_INTER_LINEAR);
loadImage.ROI = Rectangle.Empty;
loadImage.Draw(new Rectangle(new Point(lefteyebrowpoint - xxx, eyeObjects[0].Y - neareyebrowpoint), new Size((righteyebrowpoint + xxx) - (lefteyebrowpoint - xxx), faceheight)),new Gray(0),2);
if (!cropimage.Equals(darkimage))
{
cropimage._EqualizeHist();
imageBox7.Image = cropimage; //line 2
cropimage.Save(folderPath + tempPath);
string dbPath = (folderPath + tempPath).Replace("\\","/");
//mydb.InsertImageTraining(newid, dbPath, true);
//File.Delete(tempPath);
eigenRecog.reloadData();
imageBox1.Image = loadImage;
imageBox7.Image = cropimage;
return true;
//Fish_Recog.reloadData();
}
else
{
imageBox1.Image = loadImage;
imageBox7.Image = cropimage;
return false;
}
}
else
{
return false;
}
}
}
else
{
return false;
}
}
else
{
return false;
}
}
catch
{
return false;
}
return false;
}
示例15: timer1_Tick
private void timer1_Tick(object sender, EventArgs e)
{
using (Image nextframe = cap.QueryFrame())
{
if (nextframe != null)
{
if (isTrack == false)
{
Image grayframe = nextframe.Convert();
grayframe._EqualizeHist();
var faces = grayframe.DetectHaarCascade(haar, 1.4, 4, HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT | HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(40, 40))[0];
hsv = new Image(grayframe.Width, grayframe.Height);
hsv = nextframe.Convert();
hsv._EqualizeHist();
hue = new Image(grayframe.Width, grayframe.Height);
mask = new Image(grayframe.Width, grayframe.Height);
backproject = new Image(grayframe.Width, grayframe.Height);
Emgu.CV.CvInvoke.cvInRangeS(hsv, new MCvScalar(0, 30, Math.Min(10, 255), 0), new MCvScalar(180, 256, Math.Max(10, 255), 0), mask);
Emgu.CV.CvInvoke.cvSplit(hsv, hue, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero);
picHue.Image = hue.ToBitmap();
foreach (var face in faces)
{
// Rectangle roi = new Rectangle(face.rect.X + face.rect.Width / 4, face.rect.Y + face.rect.Height / 4, face.rect.Width / 2, face.rect.Height / 2);
// Rectangle roi = new Rectangle(face.rect.X, face.rect.Y, face.rect.Width / 2, face.rect.Height / 2);
Emgu.CV.CvInvoke.cvSetImageROI(hue, face.rect);
Emgu.CV.CvInvoke.cvSetImageROI(mask, face.rect);
nextframe.Draw(face.rect, new Bgr(0, double.MaxValue, 1), 2);
picMask.Image = mask.ToBitmap();
trackwin = face.rect;
}
img = new IntPtr[1]
{
hue
};
Emgu.CV.CvInvoke.cvCalcHist(img, hist, false, mask);
Emgu.CV.CvInvoke.cvResetImageROI(hue);
Emgu.CV.CvInvoke.cvResetImageROI(mask);
CapImg.Image = nextframe.ToBitmap();
isTrack = true;
// isTrack = true;
}
else
{
if (trackwin != null)
{
hsv = nextframe.Convert();
Emgu.CV.CvInvoke.cvInRangeS(hsv, new MCvScalar(0, 30, 10, 0), new MCvScalar(180, 256, 256, 0), mask);
Emgu.CV.CvInvoke.cvSplit(hsv, hue, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero);
picMask.Image = mask.ToBitmap();
picHue.Image = hue.ToBitmap();
}
img = new IntPtr[1]
{
hue
};
Emgu.CV.CvInvoke.cvCalcBackProject(img, backproject, hist);
Emgu.CV.CvInvoke.cvAnd(backproject, mask, backproject, IntPtr.Zero);
Image grayframe = nextframe.Convert();
grayframe._EqualizeHist();
var faces = grayframe.DetectHaarCascade(haar, 1.4, 4, HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT | HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(40, 40))[0];
foreach (var face in faces)
{
nextframe.Draw(face.rect, new Bgr(Color.Black), 2);
}
if (trackwin.Width == 0) trackwin.Width = 40;
if (trackwin.Height == 0) trackwin.Height = 40;
Emgu.CV.CvInvoke.cvCamShift(backproject, trackwin, new MCvTermCriteria(10, 0.1), out trackcomp, out trackbox);
trackwin = trackcomp.rect;
// CvInvoke.cvEllipseBox(nextframe, trackbox, new MCvScalar(0, 255, 0), 2, LINE_TYPE.CV_AA, 0);
nextframe.Draw(trackwin, new Bgr(Color.Blue), 3);
CapImg.Image = nextframe.ToBitmap();
faceS = nextframe.Copy(trackwin);
picFace.Image = faceS.ToBitmap();
}
}
}
//.........这里部分代码省略.........