本文整理汇总了C#中Image.ThresholdBinaryInv方法的典型用法代码示例。如果您正苦于以下问题:C# Image.ThresholdBinaryInv方法的具体用法?C# Image.ThresholdBinaryInv怎么用?C# Image.ThresholdBinaryInv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.ThresholdBinaryInv方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FilterPlate
private static Image<Gray, Byte> FilterPlate(Image<Gray, Byte> plate)
{
Image<Gray, Byte> thresh = plate.ThresholdBinaryInv(new Gray(120), new Gray(255));
Image<Gray, Byte> plateMask = new Image<Gray, byte>(plate.Size);
Image<Gray, Byte> plateCanny = plate.Canny(new Gray(100), new Gray(50));
MemStorage stor = new MemStorage();
{
plateMask.SetValue(255.0);
for (
Contour<Point> contours = plateCanny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
stor);
contours != null; contours = contours.HNext)
{
Rectangle rect = contours.BoundingRectangle;
if (rect.Height > (plate.Height >> 1))
{
rect.X -= 1; rect.Y -= 1; rect.Width += 2; rect.Height += 2;
rect.Intersect(plate.ROI);
plateMask.Draw(rect, new Gray(0.0), -1);
}
}
thresh.SetValue(0, plateMask);
}
thresh._Erode(1);
thresh._Dilate(1);
return thresh;
}
示例2: ProcessFrame
public void ProcessFrame(int threshold)
{
m_OriginalImage = m_Capture.QueryFrame();
m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);
// Make the dark portions bigger
m_ErodedImage = m_ClippedImage.Erode(1);
//Convert the image to grayscale
m_GrayImage = m_ErodedImage.Convert<Gray, Byte>();
m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(threshold), new Gray(255));
FindRectangles(m_BlackAndWhiteImage);
this.FoundRectangleCount = m_FoundRectangles.Count;
if (this.FoundRectangleCount == m_ImageModel.ExpectedRectangleCount)
{
m_ImageModel.AssignFoundRectangles(m_FoundRectangles);
m_FoundRectanglesImage = CreateRectanglesImage(m_ImageModel.GetInsideRectangles());
}
else
{
m_FoundRectanglesImage = CreateRectanglesImage(m_FoundRectangles);
}
}
示例3: FindBlobs
public static Image<Bgr, byte> FindBlobs(Image<Bgr, byte> source)
{
//source._EqualizeHist();
var edges = new Image<Bgr, byte>(source.Width, source.Height);
// source.SmoothMedian(5);
for (int i = 0; i < 3; i++)
edges[i] = source[i].Canny(new Gray(100), new Gray(100));
var distTransformed = new Image<Gray, float>(source.Width, source.Height);
var grayEdges = edges.Convert<Gray, byte>().Not();
CvInvoke.cvDistTransform(grayEdges.Ptr, distTransformed.Ptr, DIST_TYPE.CV_DIST_L2, 3, new[] { 1f, 1f }, IntPtr.Zero);
var byteDist = distTransformed.ThresholdBinaryInv(new Gray(2), new Gray(255)).Convert<Gray, byte>();
//return byteDist.Convert<Bgr, byte>();
Image<Gray, byte> mask = new Image<Gray, byte>(byteDist.Width + 2, byteDist.Height + 2);
mask.ROI = new Rectangle(1,1,byteDist.Width, byteDist.Height);
CvInvoke.cvCopy(byteDist, mask, IntPtr.Zero);
mask.ROI = new Rectangle(0, 0, byteDist.Width+2, byteDist.Height+2);
edges = grayEdges.Convert<Bgr, byte>();
/* Flood fill */
for (int i = 0; i < edges.Width; i++)
{
for (int j = 0; j < edges.Height; j++)
{
if (mask.Data[j, i, 0] == 0)
{
var comp = new MCvConnectedComp();
CvInvoke.cvFloodFill(
edges.Ptr,
new Point(i, j),
new MCvScalar(200, 200, 200, 0), // Color
new MCvScalar(0, 0, 0), // Lo
new MCvScalar(0, 0, 0), // Up
out comp,
Emgu.CV.CvEnum.CONNECTIVITY.EIGHT_CONNECTED,
Emgu.CV.CvEnum.FLOODFILL_FLAG.DEFAULT,
mask.Ptr
);
if (comp.area > 500 && comp.area < 2500
&& comp.rect.Size.Height > 10 && comp.rect.Size.Height < 130
&& comp.rect.Size.Width > 10 && comp.rect.Size.Width < 130)
{
ReplaceColors(edges, comp.rect);
}
}
}
}
//TrackBlobs(edges, source);
return edges;
}
示例4: ProcessFrame
public void ProcessFrame()
{
m_OriginalImage = m_Capture.QueryFrame();
m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);
//Convert the image to grayscale
m_GrayImage = m_ClippedImage.Convert<Gray, Byte>();
m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(m_Threshold), new Gray(255));
int countNonZero = m_BlackAndWhiteImage.CountNonzero()[0];
m_DeltaNonZero = Math.Abs(m_CountNonZero - countNonZero);
if (m_DeltaNonZero > c_ChangeThreshold)
{
m_UtcLastSignificantChange = DateTime.UtcNow;
}
m_CountNonZero = countNonZero;
}
示例5: GetBlobEvents
public static List<Event> GetBlobEvents(Image<Bgr, byte> source)
{
var events = new List<Event>();
//source._EqualizeHist();
var edges = new Image<Bgr, byte>(source.Width, source.Height);
// source.SmoothMedian(5);
for (int i = 0; i < 3; i++)
edges[i] = source[i].Canny(new Gray(100), new Gray(100));
var distTransformed = new Image<Gray, float>(source.Width, source.Height);
var grayEdges = edges.Convert<Gray, byte>().Not();
CvInvoke.cvDistTransform(grayEdges.Ptr, distTransformed.Ptr, DIST_TYPE.CV_DIST_L2, 3, new[] { 1f, 1f }, IntPtr.Zero);
var byteDist = distTransformed.ThresholdBinaryInv(new Gray(2), new Gray(255)).Convert<Gray, byte>();
//return byteDist.Convert<Bgr, byte>();
Image<Gray, byte> mask = new Image<Gray, byte>(byteDist.Width + 2, byteDist.Height + 2);
mask.ROI = new Rectangle(1, 1, byteDist.Width, byteDist.Height);
CvInvoke.cvCopy(byteDist, mask, IntPtr.Zero);
mask.ROI = new Rectangle(0, 0, byteDist.Width + 2, byteDist.Height + 2);
edges = grayEdges.Convert<Bgr, byte>();
/* Flood fill */
for (int i = 0; i < edges.Width; i++)
{
for (int j = 0; j < edges.Height; j++)
{
if (mask.Data[j, i, 0] == 0)
{
var comp = new MCvConnectedComp();
CvInvoke.cvFloodFill(
edges.Ptr,
new Point(i, j),
new MCvScalar(200, 200, 200, 0), // Color
new MCvScalar(0, 0, 0), // Lo
new MCvScalar(0, 0, 0), // Up
out comp,
CONNECTIVITY.FOUR_CONNECTED,
FLOODFILL_FLAG.DEFAULT,
mask.Ptr
);
if (IsMouse(comp, source, mask))
{
events.Add(new Event(comp, Event.EventType.ObjectIsFounded));
}
}
}
}
return events;
}
示例6: ProcessFrame
public void ProcessFrame(int threshold)
{
DisposeImages();
m_OriginalImage = m_Capture.QueryFrame();
m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);
//m_ClippedImage.PyrDown().PyrUp();
//Image<Gray, Byte>[] channels = new Image<Gray,byte>[]
//{
// m_ClippedImage[0],
// m_ClippedImage[1],
// m_ClippedImage[2]
//};
//for (int i = 0; i < 3; i++)
//{
// channels[i]._EqualizeHist();
//}
//m_ClippedImage[0]._EqualizeHist();
//m_ClippedImage[1]._EqualizeHist();
//m_ClippedImage[2]._EqualizeHist();
//m_WhiteBalancedImage = channels[2]; // new Image<Bgr, byte>(channels);
// Make the dark portions bigger
m_ErodedImage = m_ClippedImage.Erode(1);
//StructuringElementEx structuringElementEx= new StructuringElementEx(new int[1,1], 0,0);
//m_WhiteBalancedImage = m_ErodedImage.MorphologyEx(structuringElementEx, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_BLACKHAT, 2); //.Erode(1);
//Convert the image to grayscale
m_GrayImage = m_ErodedImage.Convert<Gray, Byte>(); //.PyrDown().PyrUp();
//Bgr threshold = new Bgr(127, 127, 127);
//Bgr maxValue = new Bgr(255, 255, 255);
m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(threshold), new Gray(255));
List<MCvBox2D> foundRectangles = FindRectangles(m_BlackAndWhiteImage);
//Debug.WriteLine(foundRectangles.Count);
//if (foundRectangles.Count != m_ImageModel.ExpectedRectangleCount)
//{
// // not all required rectangles found
// return;
//}
//m_ImageModel.AssignFoundRectangles(foundRectangles);
////AssignFoundRectangles(foundRectangles);
//m_FoundRectanglesImage = CreateRectanglesImage();
//RecordGamePieces();
}
示例7: FrameGrabber
/// <summary>
/// the main function in this class
/// </summary>
/// <param name="sender"></param>
/// <param name="e"></param>
void FrameGrabber(object sender, EventArgs e)
{
sw.Start();
newImage = grabber.QueryFrame();
count++;
if (newImage != null)
{
current_image = newImage.Convert<Gray, byte>();
detector.Process(newImage, tempImage);
tempImage = tempImage.ThresholdBinary(thresholdValue, MaxValue);
tempImage = tempImage.Dilate(2);
tempImage = tempImage.SmoothMedian(3);
newImageG = current_image.ThresholdBinaryInv(new Gray(threshold), new Gray(255d));
newImageG = newImageG.And(tempImage);
newImageG = newImageG.Dilate(1);
if (numberOfHands > 0)
{
int tt = numberOfHands;
for (int i = 0; i < tt; i++)
{
if (x[i] != null)
{
try
{
x[i].StartTracking(elapsed_time);
}
catch(Exception ex)
{
Console.WriteLine("lost traking : number of hands {0} & list x {1}", numberOfHands, x.Count);
int id = x[i].id;
hand_centers[id] = x[i].new_center_pt;
hand_centers.Remove(id);
x.RemoveAt(id);
--numberOfHands;
}
}
}
}
if (numberOfHands < hand_detected)
{
detected_hand = HandDetection(newImageG);
if (detected_hand.Any())// any elements in the list
{
foreach (Contour<Point> h in detected_hand)
{
if (numberOfHands < hand_detected)
{
y = new HandTracking(current_image.Width, current_image.Height, hand_centers[numberOfHands]);
y.ExtractFeatures(h);
y.id = numberOfHands;
x.Add(y);
numberOfHands++;
}
else
Console.WriteLine("there is already 2 hands");
}
detected_hand.Clear();
}
}
sw.Stop();
elapsed_time = sw.Elapsed.TotalMilliseconds;
sw.Reset();
imageBoxSkin.Image = newImage;
imageBoxFrameGrabber.Image = newImageG;
}
}
示例8: ProcessFrame
public void ProcessFrame()
{
lock (SyncObject)
{
m_OriginalImage = m_Capture.QueryFrame();
m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);
// Make the dark portions bigger
m_ErodedImage = m_ClippedImage.Erode(1);
//Convert the image to grayscale
m_GrayImage = m_ErodedImage.Convert<Gray, Byte>(); //.PyrDown().PyrUp();
m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(m_Threshold), new Gray(255));
FindBlobsAndDraw(m_BlackAndWhiteImage);
}
RaiseChangedEvent();
}