本文整理汇总了C#中Image._Not方法的典型用法代码示例。如果您正苦于以下问题:C# Image._Not方法的具体用法?C# Image._Not怎么用?C# Image._Not使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image._Not方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Work
public void Work(string database, string source)
{
m_image = new Image<Bgr, byte>(source);
if (m_image == null)
{
Log("invalid source " + source);
return;
}
Log("Scanning files " + database);
string[] files = System.IO.Directory.GetFiles(database, "*.jpg");
foreach (var current in files)
{
Load(current);
}
Log("Scanning done");
m_rows = m_image.Height / m_sizey;
m_cols = m_image.Width / m_sizex;
m_used = 0;
m_mask = new Image<Gray, Byte>(m_sizex, m_sizey);
float halfx = m_sizex / 2;
float halfy = m_sizey / 2;
Ellipse elipse = new Ellipse(new PointF(halfx, halfy), new SizeF((float)m_sizex * 0.95f, (float)m_sizey * 0.95f), 90.0f);
m_mask.Draw(elipse, new Gray(255), -1);
m_mask._SmoothGaussian(15);
m_mask = m_mask * 0.90;
//ImageViewer.Show(m_mask, "Mask");
//m_mask = new Image<Gray, Byte>(m_sizex, m_sizey);
//float sx = (int)(m_sizex * 0.9f);
//float sy = (int)(m_sizey * 0.9f);
//Rectangle rect = new Rectangle((int)(m_sizex - sx) / 2, (int)(m_sizey - sy) / 2, (int)sx, (int)sy);
//m_mask.Draw(rect, new Gray(255), -1);
//m_mask._SmoothGaussian(15);
//m_mask = m_mask * 0.75;
//ImageViewer.Show(m_mask, "Mask");
m_mask._Not();
m_image._SmoothGaussian(51);
Log("Row: " + m_rows + " Cols: " + m_cols + " Size[" + m_sizex + "," + m_sizey + "]");
for (int r = 0; r != m_rows; ++r)
{
for (int c = 0; c != m_cols; ++c)
ProcessCell(r, c);
Log("R: " + r);
}
m_image.ROI = new Rectangle(0, 0, m_cols * m_sizex, m_rows * m_sizey);
m_image.Save("dump.jpg");
}
示例2: FindPattern
public override bool FindPattern(Image<Gray, byte> img, out PointF[] image_points) {
image_points = null;
if (_marker == null) {
return false;
}
Contour<Point> contour_points;
// Find contour points in black/white image
using (Image<Gray, byte> binary = new Image<Gray, byte>(img.Size)) {
CvInvoke.cvThreshold(img, binary, _binary_threshold, 255, THRESH.CV_THRESH_BINARY | THRESH.CV_THRESH_OTSU);
binary._Not(); // Contour is searched on white points, marker envelope is black.
contour_points = binary.FindContours();
}
lock (_sync) { // Lock here in case the pattern is changed while using it.
bool marker_found = false;
double best_error = _max_error_normed;
while (contour_points != null) {
// Approximate contour points by poly-lines.
// For our marker-envelope should generate a poly-line consisting of four vertices.
Contour<Point> c = contour_points.ApproxPoly(contour_points.Perimeter * 0.05, _contour_storage);
if (c.Total == 4 && c.Perimeter > 200) {
// Warp content of poly-line as if looking at it from the top
PointF[] warp_source = new PointF[] {
new PointF(c[0].X, c[0].Y),
new PointF(c[1].X, c[1].Y),
new PointF(c[2].X, c[2].Y),
new PointF(c[3].X, c[3].Y)
};
CvInvoke.cvGetPerspectiveTransform(warp_source, _warp_dest, _warp_matrix);
CvInvoke.cvWarpPerspective(
img, _warped, _warp_matrix,
(int)INTER.CV_INTER_CUBIC + (int)WARP.CV_WARP_FILL_OUTLIERS,
new MCvScalar(0)
);
CvInvoke.cvThreshold(_warped, _warped, _binary_threshold, 255, THRESH.CV_THRESH_BINARY | THRESH.CV_THRESH_OTSU);
// Perform a template matching with the stored pattern in order to
// determine if content of the envelope matches the stored pattern and
// determine the orientation of the pattern in the image.
// Orientation is encoded
// 0: 0°, 1: 90°, 2: 180°, 3: 270°
double error;
int orientation;
MatchPattern(out error, out orientation);
if (error < best_error) {
best_error = error;
int id_0 = orientation;
int id_1 = (orientation + 1) % 4;
int id_2 = (orientation + 2) % 4;
int id_3 = (orientation + 3) % 4;
// ids above are still counterclockwise ordered, we need to permute them
// 0 3 0 1
// +---+ +---+
// | | -> | |
// +---+ +---+
// 1 2 2 3
image_points = new PointF[4];
image_points[0] = c[id_0];
image_points[1] = c[id_3];
image_points[2] = c[id_1];
image_points[3] = c[id_2];
marker_found = true;
}
}
contour_points = contour_points.HNext;
}
return marker_found;
}
}
示例3: TestDiatanceTransform
public void TestDiatanceTransform()
{
Image<Gray, Byte> img = new Image<Gray, byte>(480, 320);
img.Draw(new Rectangle(200, 100, 160, 90), new Gray(255), 1);
img._Not();
Image<Gray, Single> dst = new Image<Gray, Single>(img.Size);
CvInvoke.cvDistTransform(img, dst, Emgu.CV.CvEnum.DIST_TYPE.CV_DIST_L2, 3, null, IntPtr.Zero);
}
示例4: FindStopSign
private void FindStopSign(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList, Contour<Point> contours)
{
for (; contours != null; contours = contours.HNext)
{
contours.ApproxPoly(contours.Perimeter * 0.02, 0, contours.Storage);
if (contours.Area > 200)
{
double ratio = CvInvoke.cvMatchShapes(_octagon, contours, Emgu.CV.CvEnum.CONTOURS_MATCH_TYPE.CV_CONTOURS_MATCH_I3, 0);
if (ratio > 0.1) //not a good match of contour shape
{
Contour<Point> child = contours.VNext;
if (child != null)
FindStopSign(img, stopSignList, boxList, child);
continue;
}
Rectangle box = contours.BoundingRectangle;
Image<Gray, Byte> candidate;
using (Image<Bgr, Byte> tmp = img.Copy(box))
candidate = tmp.Convert<Gray, byte>();
//set the value of pixels not in the contour region to zero
using (Image<Gray, Byte> mask = new Image<Gray, byte>(box.Size))
{
mask.Draw(contours, new Gray(255), new Gray(255), 0, -1, new Point(-box.X, -box.Y));
double mean = CvInvoke.cvAvg(candidate, mask).v0;
candidate._ThresholdBinary(new Gray(mean), new Gray(255.0));
candidate._Not();
mask._Not();
candidate.SetValue(0, mask);
}
SURFFeature[] features = candidate.ExtractSURF(ref _surfParam);
SURFTracker.MatchedSURFFeature[] matchedFeatures = _tracker.MatchFeature(features, 2, 20);
int goodMatchCount = 0;
foreach (SURFTracker.MatchedSURFFeature ms in matchedFeatures)
if (ms.SimilarFeatures[0].Distance < 0.5) goodMatchCount++;
if (goodMatchCount >= 10)
{
boxList.Add(box);
stopSignList.Add(candidate);
}
}
}
}
示例5: ProcessImage
public void ProcessImage(Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> image) {
Emgu.CV.Image<Gray, byte> gray = image.Convert<Gray, byte>();
Emgu.CV.Image<Gray, byte> binary = new Image<Gray,byte>(image.Size);
CvInvoke.cvThreshold(gray, binary, 40, 255, THRESH.CV_THRESH_BINARY | THRESH.CV_THRESH_OTSU);
binary._Not();
Emgu.CV.Contour<System.Drawing.Point> contour_points = binary.FindContours();
MemStorage storage = new MemStorage();
Matrix<double> warp = new Matrix<double>(3, 3);
while (contour_points != null) {
Contour<Point> c = contour_points.ApproxPoly(contour_points.Perimeter * 0.05, storage);
double p = c.Perimeter;
if (c.Total == 4 && p > 300) {
PointF[] src = new PointF[] {
new PointF(c[0].X, c[0].Y),
new PointF(c[1].X, c[1].Y),
new PointF(c[2].X, c[2].Y),
new PointF(c[3].X, c[3].Y)};
CvInvoke.cvGetPerspectiveTransform(src, _dest, warp);
int flags = (int)INTER.CV_INTER_LINEAR + (int)WARP.CV_WARP_FILL_OUTLIERS;
CvInvoke.cvWarpPerspective(gray, _roi, warp, flags, new MCvScalar(0));
double min_error;
Orientation orient;
FindBestOrientation(out min_error, out orient);
if (min_error < 0.4) {
image.DrawPolyline(c.ToArray(), true, new Bgr(Color.Green), 2);
System.Console.WriteLine(min_error + " " + orient);
switch (orient) {
case Orientation.Degrees0:
image.Draw(new LineSegment2D(c[0], c[3]), new Bgr(System.Drawing.Color.Red), 2);
break;
case Orientation.Degrees90:
image.Draw(new LineSegment2D(c[1], c[0]), new Bgr(System.Drawing.Color.Red), 2);
break;
case Orientation.Degrees180:
image.Draw(new LineSegment2D(c[2], c[1]), new Bgr(System.Drawing.Color.Red), 2);
break;
case Orientation.Degrees270:
image.Draw(new LineSegment2D(c[3], c[2]), new Bgr(System.Drawing.Color.Red), 2);
break;
}
}
// 0 degrees
}
contour_points = contour_points.HNext;
}
}
示例6: SetPixelsOutsideContourAreaToZero
private Image<Gray, Byte> SetPixelsOutsideContourAreaToZero(Image<Gray, Byte> contourImage, Rectangle contourBoundingRectangle, Contour<Point> contours)
{
using (Image<Gray, Byte> mask = new Image<Gray, byte>(contourBoundingRectangle.Size))
{
mask.Draw(contours, new Gray(255), new Gray(255), 0, -1, new Point(-contourBoundingRectangle.X, -contourBoundingRectangle.Y));
double mean = CvInvoke.cvAvg(contourImage, mask).v0;
contourImage._ThresholdBinary(new Gray(mean), new Gray(255.0));
contourImage._Not();
mask._Not();
contourImage.SetValue(0, mask);
}
return contourImage;
}
示例7: FindStopSign
private void FindStopSign(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList, Contour<Point> contours)
{
for (; contours != null; contours = contours.HNext)
{
//draw box from any contour
imageGray.Draw(new CircleF(centerBox(contours.BoundingRectangle), 3), new Gray(150), 2);
contours.ApproxPoly(contours.Perimeter * 0.02, 0, contours.Storage);
if (contours.Area > 20)
{
double ratio = CvInvoke.cvMatchShapes(_octagon2, contours, Emgu.CV.CvEnum.CONTOURS_MATCH_TYPE.CV_CONTOURS_MATCH_I3, 0);
if (ratio > 0.1) //not a good match of contour shape
{
Contour<Point> child = contours.VNext;
if (child != null)
FindStopSign(img, stopSignList, boxList, child);
continue;
}
Rectangle box = contours.BoundingRectangle;
Image<Gray, Byte> candidate;
using (Image<Bgr, Byte> tmp = img.Copy(box))
candidate = tmp.Convert<Gray, byte>();
//set the value of pixels not in the contour region to zero
using (Image<Gray, Byte> mask = new Image<Gray, byte>(box.Size))
{
mask.Draw(contours, new Gray(255), new Gray(255), 0, -1, new Point(-box.X, -box.Y));
double mean = CvInvoke.cvAvg(candidate, mask).v0;
candidate._ThresholdBinary(new Gray(mean), new Gray(255.0));
candidate._Not();
mask._Not();
candidate.SetValue(0, mask);
}
ImageFeature<float>[] features = _detector2.DetectFeatures(candidate, null);
Features2DTracker<float>.MatchedImageFeature[] matchedFeatures = _tracker2.MatchFeature(features, 2);
int goodMatchCount = 0;
foreach (Features2DTracker<float>.MatchedImageFeature ms in matchedFeatures)
if (ms.SimilarFeatures[0].Distance < 0.5) goodMatchCount++;
if (goodMatchCount >= 10)
{
//imageGray.Draw(contours, new Gray(150), 2);
imagecolor.Draw(contours, new Bgr(255,0,0), 2);
areas.Add(contours.Area);
boxList.Add(box);
imageGray.Draw(contours.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE), new Gray(150), 2);
stopSignList.Add(candidate);
}
}
}
}
示例8: TestDiatanceTransform
public void TestDiatanceTransform()
{
Image<Gray, Byte> img = new Image<Gray, byte>(480, 320);
img.Draw(new Rectangle(200, 100, 160, 90), new Gray(255), 1);
img._Not();
Image<Gray, Single> dst = new Image<Gray, Single>(img.Size);
CvInvoke.DistanceTransform(img, dst, null, Emgu.CV.CvEnum.DistType.L2, 3);
}
示例9: GenerateLogo
public Image<Bgra, byte> GenerateLogo(int width, int height = -1)
{
int heightShift = 0;
int textHeight = (int)(width / 160.0 * 72.0);
if (height <= 0)
height = textHeight;
else
{
heightShift = Math.Max((height - textHeight)/2, 0);
}
double scale = width / 160.0;
Image<Bgr, Byte> semgu = new Image<Bgr, byte>(width, height, new Bgr(0, 0, 0));
Image<Bgr, Byte> scv = new Image<Bgr, byte>(width, height, new Bgr(0, 0, 0));
//MCvFont f1 = new MCvFont(CvEnum.FontFace.HersheyTriplex, 1.5 * scale, 1.5 * scale);
//MCvFont f2 = new MCvFont(CvEnum.FontFace.HersheyComplex, 1.6 * scale, 2.2 * scale);
semgu.Draw("Emgu", Point.Round(new PointF((float)(6 * scale), (float)(50 * scale + heightShift))), CvEnum.FontFace.HersheyTriplex, 1.5*scale, new Bgr(55, 155, 255), (int) Math.Round( 1.5*scale ));
semgu._Dilate((int)(1 * scale));
scv.Draw("CV", Point.Round(new PointF((float)(50 * scale), (float)(60 * scale + heightShift))), CvEnum.FontFace.HersheySimplex, 1.6 * scale, new Bgr(255, 55, 255), (int) Math.Round(2.2*scale));
scv._Dilate((int)(2 * scale));
Image<Bgr, Byte> logoBgr = semgu.Or(scv);
Image<Gray, Byte> logoA = new Image<Gray, byte>(logoBgr.Size);
logoA.SetValue(255, logoBgr.Convert<Gray, Byte>());
logoBgr._Not();
logoA._Not();
Image<Gray, Byte>[] channels = logoBgr.Split();
channels = new Image<Gray, byte>[] { channels[0], channels[1], channels[2], new Image<Gray, Byte>(channels[0].Width, channels[0].Height, new Gray(255.0)) };
Image<Bgra, Byte> logoBgra = new Image<Bgra, byte>(channels);
logoBgra.SetValue(new Bgra(0.0, 0.0, 0.0, 0.0), logoA);
//logoBgra.Save("EmguCVLogo.png");
return logoBgra;
/*
Image<Bgr, Byte> bg_header = new Image<Bgr, byte>(1, 92);
for (int i = 0; i < 92; i++)
bg_header[i, 0] = new Bgr(210, 210 - i * 0.4, 210 - i * 0.9);
bg_header.Save("bg_header.gif");*/
}
示例10: ProcessImage
public void ProcessImage(Image<Gray, byte> grayFrame)
{
if (equalizeHist)
grayFrame._EqualizeHist();//autocontrast
//smoothed
Image<Gray, byte> smoothedGrayFrame = grayFrame.PyrDown();
smoothedGrayFrame = smoothedGrayFrame.PyrUp();
//canny
Image<Gray, byte> cannyFrame = null;
if (noiseFilter)
cannyFrame = smoothedGrayFrame.Canny(new Gray(cannyThreshold),
new Gray(cannyThreshold));
//smoothing
if (blur)
grayFrame = smoothedGrayFrame;
//binarize
CvInvoke.cvAdaptiveThreshold(grayFrame, grayFrame, 255,
Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C,
Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY,
adaptiveThresholdBlockSize + adaptiveThresholdBlockSize % 2 + 1,
adaptiveThresholdParameter);
//
grayFrame._Not();
//
if (addCanny)
if (cannyFrame != null)
grayFrame._Or(cannyFrame);
//
this.binarizedFrame = grayFrame;
//dilate canny contours for filtering
if (cannyFrame != null)
cannyFrame = cannyFrame.Dilate(3);
//find contours
var sourceContours = grayFrame.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
//filter contours
contours = FilterContours(sourceContours, cannyFrame,
grayFrame.Width, grayFrame.Height);
//find templates
lock (foundTemplates)
foundTemplates.Clear();
samples.Clear();
lock (templates)
Parallel.ForEach<Contour<Point>>(contours, (contour) =>
{
var arr = contour.ToArray();
Template sample = new Template(arr, contour.Area, samples.templateSize);
lock (samples)
samples.Add(sample);
if (!onlyFindContours)
{
FoundTemplateDesc desc = finder.FindTemplate(templates, sample);
if (desc != null)
lock (foundTemplates)
foundTemplates.Add(desc);
}
}
);
//
FilterByIntersection(ref foundTemplates);
}
示例11: FillMaskHoles
public void FillMaskHoles(string maskPath0, string maskPath1, string newImagePath, string incompleteImagePath, string resultPath)
{
Image<Bgr, byte> mask0 = new Image<Bgr, byte>(maskPath0);
Image<Bgr, byte> mask1 = new Image<Bgr, byte>(maskPath1);
Image<Bgr, byte> new0 = new Image<Bgr, byte>(newImagePath);
Image<Bgr, byte> incomplete = new Image<Bgr, byte>(incompleteImagePath);
Image<Bgr, byte> resultMask = new Image<Bgr, byte>(new byte[288, 352, 3]);
// Neue Maske: Die unterschiede der beiden Input-Masken
for (int i = 0; i < mask0.Cols; i++)
{
for (int j = 0; j < mask0.Rows; j++)
{
if (mask0.Data[j, i, 0] == 0 && mask0.Data[j, i, 1] == 0 && mask0.Data[j, i, 2] == 0 &&
mask1.Data[j, i, 0] == 255 && mask1.Data[j, i, 1] == 255 && mask1.Data[j, i, 2] == 255)
{
resultMask.Data[j, i, 0] = 0;
resultMask.Data[j, i, 1] = 0;
resultMask.Data[j, i, 2] = 0;
}
else
{
resultMask.Data[j, i, 0] = 255;
resultMask.Data[j, i, 1] = 255;
resultMask.Data[j, i, 2] = 255;
}
}
}
resultMask._Not();
CvInvoke.cvCopy(new0, incomplete, resultMask);
incomplete.Save(resultPath);
}
示例12: CopyEmptyAreasToBase
public BitmapImage CopyEmptyAreasToBase(BitmapImage bitmapImage0, BitmapImage bitmapImage1, BitmapImage bitmapImageMask)
{
Image<Bgr, byte> image1 = new Image<Bgr, byte>(BitmapImageToBitmap(bitmapImage0));
Image<Bgr, byte> image2 = new Image<Bgr, byte>(BitmapImageToBitmap(bitmapImage1));
Image<Bgr, byte> mask = new Image<Bgr, byte>(BitmapImageToBitmap(bitmapImageMask));
mask._Not();
CvInvoke.cvCopy(image2, image1, mask);
return BitmapToBitmapImage(image1.Bitmap);
}
示例13: ProcessImage
public void ProcessImage(Image<Gray, byte> grayFrame, bool enableMaxContour = false)
{
if (equalizeHist)
grayFrame._EqualizeHist();//autocontrast
//smoothed
Image<Gray, byte> smoothedGrayFrame = grayFrame.PyrDown();
smoothedGrayFrame = smoothedGrayFrame.PyrUp();
//canny
Image<Gray, byte> cannyFrame = null;
if (noiseFilter)
cannyFrame = smoothedGrayFrame.Canny(new Gray(cannyThreshold), new Gray(cannyThreshold));
//smoothing
if (blur)
grayFrame = smoothedGrayFrame;
//binarize
CvInvoke.cvAdaptiveThreshold(grayFrame, grayFrame, 255, ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, THRESH.CV_THRESH_BINARY,
adaptiveThresholdBlockSize + adaptiveThresholdBlockSize % 2 + 1, adaptiveThresholdParameter);
//
grayFrame._Not();
//
if (addCanny)
if (cannyFrame != null)
grayFrame._Or(cannyFrame);
//
this.binarizedFrame = grayFrame;
//dilate canny contours for filtering
if (cannyFrame != null)
cannyFrame = cannyFrame.Dilate(3);
//find contours
var sourceContours = grayFrame.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, RETR_TYPE.CV_RETR_LIST);
//filter contours
contours = FilterContours(sourceContours, cannyFrame, grayFrame.Width, grayFrame.Height, enableMaxContour);
//contours = ConvertContours(sourceContours);
foundTemplates = FindTemplates(contours);
}
示例14: GenerateLogo
public void GenerateLogo()
{
Image<Bgr, Byte> semgu = new Image<Bgr, byte>(160, 72, new Bgr(0, 0, 0));
Image<Bgr, Byte> scv = new Image<Bgr, byte>(160, 72, new Bgr(0, 0, 0));
MCvFont f1 = new MCvFont(CvEnum.FONT.CV_FONT_HERSHEY_TRIPLEX, 1.5, 1.5);
MCvFont f2 = new MCvFont(CvEnum.FONT.CV_FONT_HERSHEY_COMPLEX, 1.6, 2.2);
semgu.Draw("Emgu", ref f1, new Point(6, 50), new Bgr(55, 155, 255));
semgu._Dilate(1);
scv.Draw("CV", ref f2, new Point(50, 60), new Bgr(255, 55, 255));
scv._Dilate(2);
Image<Bgr, Byte> logoBgr = semgu.Or(scv);
Image<Gray, Byte> logoA = new Image<Gray, byte>(logoBgr.Width, logoBgr.Height);
logoA.SetValue(255, logoBgr.Convert<Gray, Byte>());
logoBgr._Not();
logoA._Not();
Image<Gray, Byte>[] channels = logoBgr.Split();
channels = new Image<Gray, byte>[] { channels[0], channels[1], channels[2], new Image<Gray, Byte>(channels[0].Width, channels[0].Height, new Gray(255.0)) };
Image<Bgra, Byte> logoBgra = new Image<Bgra, byte>(channels);
logoBgra.SetValue(new Bgra(0.0, 0.0, 0.0, 0.0), logoA);
logoBgra.Save("EmguCVLogo.gif");
Image<Bgr, Byte> bg_header = new Image<Bgr, byte>(1, 92);
for (int i = 0; i < 92; i++)
bg_header[i, 0] = new Bgr(210, 210 - i * 0.4, 210 - i * 0.9);
bg_header.Save("bg_header.gif");
}