本文整理汇总了C#中Image.SmoothGaussian方法的典型用法代码示例。如果您正苦于以下问题:C# Image.SmoothGaussian方法的具体用法?C# Image.SmoothGaussian怎么用?C# Image.SmoothGaussian使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.SmoothGaussian方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: ProcessColorImage
public override Image ProcessColorImage(Bitmap frame, ToteDetectionType detectionType, bool detectBins)
{
Image<Bgr, Byte> img = new Image<Bgr, Byte>(frame);
//// Get The Thresh Image With Given Values
//Image<Gray, byte> thresh = (threshData as BgrThreshData).InRange(img);
//// Pixelate Image
//threshData.Blur(ref thresh);
//
//
//Image ret = base.AnalyzeImage(thresh, detectionType, detectBins);
//frame.Dispose();
//thresh.Dispose();
img = img.SmoothMedian(11);
img = img.SmoothGaussian(11);
img = img.Erode(15);
img = img.Dilate(10);
// Try this: img.HoughCircles();
Image<Gray, byte> thresh = img.InRange(new Bgr(110, 130, 100), new Bgr(164, 166, 181));
Contour<Point> countor = thresh.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
List<Contour<Point>> PlayingBalls = new List<Contour<Point>>(); ;
while (countor != null)
{
// filter countors
// convex hull countors
if (countor.Area > 50)
PlayingBalls.Add(countor);
countor = countor.HNext;
}
float resolutionOffset = ((float)thresh.Width * thresh.Height) / (640.0f * 480.0f);
foreach (Contour<Point> ball in PlayingBalls)
{
img.Draw(ball, new Bgr(255, 0, 0), (int)Math.Ceiling(3.0f));
// draw left countors and their min enclosing circle (draw on img)
}
Image ret = img.ToBitmap();
img.Dispose();
return ret;
}
示例2: DetectStopSign
public void DetectStopSign(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList)
{
Image<Bgr, Byte> smoothImg = img.SmoothGaussian(5, 5, 1.5, 1.5);
Image<Gray, Byte> smoothedRedMask = GetRedPixelMask(smoothImg);
smoothedRedMask._Dilate(1);
smoothedRedMask._Erode(1);
using (Image<Gray, Byte> canny = smoothedRedMask.Erode(3).Dilate(3).Canny(new Gray(100), new Gray(50)))
using (MemStorage stor = new MemStorage())
{
Contour<Point> contours = canny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
stor);
FindStopSign(img, stopSignList, boxList, contours);
}
}
示例3: DetectImage
public void DetectImage(Image<Gray, byte> img, List<Image<Gray, Byte>> imgList, List<Rectangle> boxList)
{
Image<Gray, Byte> smoothImg = img.SmoothGaussian(5, 5, 1.5, 1.5);
//Use Dilate followed by Erode to eliminate small gaps in some countour.
smoothImg._Dilate(1);
smoothImg._Erode(1);
using (Image<Gray, Byte> canny = smoothImg.Canny(100, 50))
using (MemStorage stor = new MemStorage())
{
Contour<Point> contours = canny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
stor);
FindImage(img, imgList, boxList, contours);
}
}
示例4: GetRegion
public static Rectangle GetRegion(string[] ColorsToLookFor, Image<Bgr, Byte> Frame)
{
ColorClassification ColorClassifier = new ColorClassification();
// Perform Edge Detection
Image<Gray, byte> grayFrame = Frame.Convert<Gray, byte>();
Task<bool[,]> detectEdges = new Task<bool[,]>(() =>
{
Image<Gray, Byte> cannyFrame = grayFrame.Canny(100, 60);
cannyFrame._Dilate(3); // use canny edge detection to determine object outlines
bool[,] BW_2 = BlobFinder.BW_Converter(cannyFrame);
return BW_2;
});
detectEdges.Start();
Frame.SmoothGaussian(25);
var ColorFrame = Frame.Convert<Rgb, float>();
int[,] SelectedColors = ColorClassifier.SegmentColors(ColorFrame.Data);
bool[,] BW_FromColor = ColorClassifier.GenerateBW(ref SelectedColors, ColorsToLookFor);
Image<Gray, byte> BW_GrayImg = BlobFinder.Gray_Converter(ref BW_FromColor);
BW_GrayImg._Dilate(3);
BW_FromColor = BlobFinder.BW_Converter(BW_GrayImg);
// Combine objects found via color recognition and edge detection
// If an object is found with edge and color keep it, otherwise discard it
bool[,] EdgeBW = detectEdges.Result;
bool[,] BW_Composite = BlobFinder.AND(ref BW_FromColor, ref EdgeBW);
BlobFinder ImageBlobs = new BlobFinder(BW_Composite);
ImageBlobs.RemoveSmallBlobs(3000);
Blob bestBlob = ImageBlobs.PickBestBlob();
Rectangle myRect = new Rectangle();
if (bestBlob != null)
{
myRect = new Rectangle(
bestBlob.Xmin,
bestBlob.Ymin,
bestBlob.Xmax - bestBlob.Xmin + 1,
bestBlob.Ymax - bestBlob.Ymin + 1);
}
BlobFinder.DrawBlobOutline(Frame.Bitmap, myRect);
return myRect;
}
示例5: processFrameAndUpdateGUI
///////////////////////////////////////////////////////////////////////////////////////////
void processFrameAndUpdateGUI(object sender, EventArgs arg)
{
imgOriginal = capWebcam.QueryFrame(); // get next frame from the webcam
if (imgOriginal == null) // if we did not get a frame
{ // show error via message box
MessageBox.Show("unable to read from webcam" + Environment.NewLine + Environment.NewLine +
"exiting program");
Environment.Exit(0); // and exit program
}
imgBlurredBGR = imgOriginal.SmoothGaussian(5); // blur
imgProcessed = imgBlurredBGR.InRange(new Bgr(0, 0, 175), new Bgr(100, 100, 256)); // filter on color
imgProcessed = imgProcessed.SmoothGaussian(5); // blur again
StructuringElementEx structuringElementEx = new StructuringElementEx(5, 5, 1, 1, CV_ELEMENT_SHAPE.CV_SHAPE_RECT); // declare structuring element to use in dilate and erode
CvInvoke.cvDilate(imgProcessed, imgProcessed, structuringElementEx, 1); // close image (dilate, then erode)
CvInvoke.cvErode(imgProcessed, imgProcessed, structuringElementEx, 1); // closing "closes" (i.e. fills in) foreground gaps
CircleF[] circles = imgProcessed.HoughCircles(new Gray(100), new Gray(50), 2, imgProcessed.Height / 4, 10, 400)[0]; // fill variable circles with all circles in the processed image
foreach (CircleF circle in circles) // for each circle
{
if (txtXYRadius.Text != "") txtXYRadius.AppendText(Environment.NewLine); // if we are not on the first line in the text box then insert a new line char
txtXYRadius.AppendText("ball position = x " + circle.Center.X.ToString().PadLeft(4) + // print ball position and radius
", y = " + circle.Center.Y.ToString().PadLeft(4) + //
", radius = " + circle.Radius.ToString("###.000").PadLeft(7)); //
txtXYRadius.ScrollToCaret(); // scroll down in text box so most recent line added (at the bottom) will be shown
// draw a small green circle at the center of the detected object
CvInvoke.cvCircle(imgOriginal, new Point((int)circle.Center.X, (int)circle.Center.Y), 3, new MCvScalar(0, 255, 0), -1, LINE_TYPE.CV_AA, 0);
imgOriginal.Draw(circle, new Bgr(Color.Red), 3); // draw a red circle around the detected object
}
ibOriginal.Image = imgOriginal; // update image boxes on form
ibProcessed.Image = imgProcessed; //
}
示例6: processFrameAndUpdateGUI
///////////////////////////////////////////////////////////////////////////////////////////
void processFrameAndUpdateGUI(object sender, EventArgs arg)
{
imgOriginal = capWebcam.QueryFrame(); // get next frame from the webcam
if (imgOriginal == null) // if we did not get a frame
{ // show error via message box
MessageBox.Show("unable to read from webcam" + Environment.NewLine + Environment.NewLine +
"exiting program");
Environment.Exit(0); // and exit program
}
imgGrayscale = imgOriginal.Convert<Gray, Byte>(); // convert to grayscale
imgBlurred = imgGrayscale.SmoothGaussian(5); // blur
double dblCannyThresh = 150.0; // declare params for call to Canny
double dblCannyThreshLinking = 75.0; //
imgCanny = imgBlurred.Canny(dblCannyThresh, dblCannyThreshLinking); // get Canny edges
ibOriginal.Image = imgOriginal; // update image boxes
ibCanny.Image = imgCanny; //
}
示例7: start
public int start(Communicator comm)
{
int Vote = 0;
try
{
Vote = LoadAndProcessImage(".\\Humerus\\1.bmp",comm);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
ibImage.SizeMode = PictureBoxSizeMode.Zoom;
imgGray = imgGray.AddWeighted(imgGray, 1.0, 0.0, 0.0);
imgGray = imgGray.ThresholdToZero(new Gray(100));
imgGray = imgGray.SmoothGaussian(9);
imgGray = imgGray.Canny(0, 80);
return Vote;
}
示例8: LoadAndProcessImage
public int LoadAndProcessImage(string FileName, Communicator comm)
{
int Vote = 0;
imgOriginal = new Image<Bgr, Byte>(FileName);
imgGray = imgOriginal.Convert<Gray, Byte>();
//BitAnalysis.StartDye(0, 0, imgGray.Height, imgGray.Width, imgGray);
hcHumerus = new HaarCascade(".\\haarHumerus_03112013_4.8_18.xml");
ibImage.Image = imgBlank;
acHumerus = hcHumerus.Detect(imgGray,
4.8,
18,
HAAR_DETECTION_TYPE.SCALE_IMAGE,
Size.Empty,
Size.Empty);
acHumerus1 = hcHumerus.Detect(imgGray,
4.8,
18,
HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
Size.Empty,
Size.Empty);
int count1 = 0, count2 = 0;
foreach (MCvAvgComp acHum in acHumerus)
{
StartDye(acHum.rect.X, acHum.rect.Y, acHum.rect.Width, acHum.rect.Height, imgGray,comm);
if (Flag) // to get coordination x,y, and with, high
{
imgOriginal.Draw(acHum.rect, new Bgr(Color.Blue), 2);
count1++;
Vote = 1;
}
imgGray.Draw(acHum.rect, new Gray(0.0), 1);
}
if (count1 ==0)
{
foreach (MCvAvgComp acHum1 in acHumerus1)
{
StartDye(acHum1.rect.X, acHum1.rect.Y, acHum1.rect.Width, acHum1.rect.Height, imgGray,comm);
if (Flag) // to get coordination x,y, and with, high
{
imgOriginal.Draw(acHum1.rect, new Bgr(Color.Red), 2);
count2++;
Vote = 1;
}
imgGray.Draw(acHum1.rect, new Gray(0.0), 1);
}
}
if (count1 == 0 && count2 == 0 )
{
imgGray = imgGray.AddWeighted(imgGray, 1.0, 0.0, 0.0);
imgGray = imgGray.ThresholdToZero(new Gray(100));
imgGray = imgGray.SmoothGaussian(9);
imgGray = imgGray.Canny(0, 80);
hcHumerus = new HaarCascade(".\\HaarHumerus_03172013_2.8_3.xml");
acHumerus = hcHumerus.Detect(imgGray,
2.8,
3,
HAAR_DETECTION_TYPE.SCALE_IMAGE,
Size.Empty,
Size.Empty);
acHumerus1 = hcHumerus.Detect(imgGray,
2.8,
3,
HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
Size.Empty,
Size.Empty);
foreach (MCvAvgComp acHum in acHumerus)
{
StartDye(acHum.rect.X, acHum.rect.Y, acHum.rect.Width, acHum.rect.Height, imgGray,comm);
if (Flag) // to get coordination x,y, and with, high
{
imgOriginal.Draw(acHum.rect, new Bgr(Color.Orange), 2);
Vote = 1;
}
imgGray.Draw(acHum.rect, new Gray(0.0), 1);
}
foreach (MCvAvgComp acHum1 in acHumerus1)
{
StartDye(acHum1.rect.X, acHum1.rect.Y, acHum1.rect.Width, acHum1.rect.Height, imgGray,comm);
if (Flag) // to get coordination x,y, and with, high
{
imgOriginal.Draw(acHum1.rect, new Bgr(Color.Green), 2);
Vote = 1;
}
imgGray.Draw(acHum1.rect, new Gray(), 1);
}
}
return Vote;
}
示例9: DetectRect
public void DetectRect(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList, List<Contour<Point>> contourSignFound)
{
imagecolor = img;
joinContour.Clear();
Image<Bgr, Byte> smoothImg = img.SmoothGaussian(5, 5, 1.5, 1.5);
Image<Gray, Byte> smoothedBlackMask = GetColorPixelMask(smoothImg, 0, 180, 0, 94, 0, 100);
imageGray = smoothedBlackMask;
//Use Dilate followed by Erode to eliminate small gaps in some countour.
smoothedBlackMask._Dilate(1);
smoothedBlackMask._Erode(1);
using (Image<Gray, Byte> canny = smoothedBlackMask.Canny(new Gray(100), new Gray(50)))//Canny(100,50))
using (MemStorage stor = new MemStorage())
{
Contour<Point> contours = canny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
stor);
FindRect(img, stopSignList, boxList, contours, 5);
}
CvInvoke.cvAnd(imageGray, imageSelector, imageGray, IntPtr.Zero);
using (Image<Gray, Byte> cannySelector = imageSelector.Canny(new Gray(100), new Gray(50)))//Canny(100,50))
using (MemStorage stor = new MemStorage())
{
Contour<Point> contours = cannySelector.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
stor);
imageGray.Draw(contours, new Gray(255), 1);
}
//imageGray.Draw(joinContour.GetMinAreaRect(),new Gray(180),1);
CvInvoke.cvShowImage("Image Black", imageGray);
PointF temp = new PointF();
MCvBox2D tempbox = new MCvBox2D();
bool swapped = false;
//bubble sort for making following sorting
// 0
// 1 2
// 4
do
{
swapped = false;
for (int i = 0; i < 3; i++)
{
if (pointBlack[i].Y > pointBlack[i + 1].Y)
{
temp = pointBlack[i];
tempbox= minBoxesBlack[i];
pointBlack[i] = pointBlack[i + 1];
minBoxesBlack[i] = minBoxesBlack[i + 1];
pointBlack[i + 1] = temp;
minBoxesBlack[i + 1] = tempbox;
swapped = true;
}
}
} while (swapped);
if (pointBlack[1].X > pointBlack[2].X)
{
temp = pointBlack[1];
tempbox = minBoxesBlack[1];
pointBlack[1] = pointBlack[2];
minBoxesBlack[1] = minBoxesBlack[2];
pointBlack[2] = temp;
minBoxesBlack[2] = tempbox;
}
MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 0.8, 0.8);
//for (int i=0; i < 4; i++)
//{
// imageGray.Draw(" " + i, ref f, new Point((int)pointBlack[i].X, (int)pointBlack[i].Y), new Gray(200));
// imageGray.Draw(minBoxesBlack[i], new Gray(100), 2);
//}
LineSegment2DF[]lines = new LineSegment2DF[9];
lines[0] = new LineSegment2DF(pointBlack[0], pointBlack[3]);
lines[1] = new LineSegment2DF(pointBlack[1], pointBlack[2]);
lines[2] = translatationLineXNeg(lines[0], lines[1]);
lines[3] = translatationLineXPos(lines[0], lines[1]);
imageGray.Draw(lines[0], new Gray(100), 2);
imageGray.Draw(lines[1], new Gray(100), 2);
imageGray.Draw(lines[2], new Gray(100), 2);
imageGray.Draw(lines[3], new Gray(100), 2);
//areas.Clear();
Image<Gray, Byte> smoothedWhiteMask = GetColorPixelMask(smoothImg, 0, 180, 0, 94, 92, 255);
imageGray = smoothedWhiteMask;
//Use Dilate followed by Erode to eliminate small gaps in some countour.
smoothedWhiteMask._Dilate(1);
smoothedWhiteMask._Erode(1);
CvInvoke.cvAnd(smoothedWhiteMask, imageSelector, smoothedWhiteMask, IntPtr.Zero);
//.........这里部分代码省略.........
示例10: create_init_img
private Image<Gray, float> create_init_img(Image<Gray, float> img, int imgDbl, double sigma)
{
Image<Gray, float> gray;
Image<Gray, float> dbl;
float sigDiff;
gray = convert_to_gray32(img);
if (imgDbl != 0)
{
sigDiff = (float) Math.Sqrt(sigma*sigma - SiftInitSigma*SiftInitSigma*4);
dbl = new Image<Gray, float>(new Size(img.Width*2, img.Height*2));
dbl = gray.Resize(dbl.Width, dbl.Height, INTER.CV_INTER_CUBIC);
dbl = dbl.SmoothGaussian(0, 0, sigDiff, sigDiff);
return dbl;
}
else
{
sigDiff = (float) Math.Sqrt(sigma*sigma - SiftInitSigma*SiftInitSigma);
gray.SmoothGaussian(0, 0, sigDiff, sigDiff);
return gray;
}
}
示例11: ProcessFrame
private void ProcessFrame(object sender, EventArgs arg)
{
if (null == camera)
return;
using (Image<Bgr, Byte> ImageFrame = camera.QueryFrame()) //line 1
{
//stream = new MemoryStream();
if(null != ImageFrame)
{
Image<Gray, Byte> ImageGrey = ImageFrame.Convert<Gray, Byte>().PyrDown().PyrUp();
Image<Gray, float> grad_x = new Image<Gray, float>(ImageGrey.Size);
Image<Gray, float> grad_y = new Image<Gray, float>(ImageGrey.Size);
Image<Gray, Byte> grad_x_abs = new Image<Gray, Byte>(ImageGrey.Size);
Image<Gray, Byte> grad_y_abs = new Image<Gray, Byte>(ImageGrey.Size);
Image<Gray, Byte> grad = new Image<Gray, Byte>(ImageGrey.Size);
//System.Drawing.Bitmap pixels = ImageFrame.ToBitmap();
//System.Drawing.Bitmap pixels;
//Emgu.CV.Matrix<Int32> pixels, grey_pixels;
//var pixels = new Emgu.CV.Image<Rgba, Int32>(ImageFrame.ToBitmap());
//var pixels_grey = new Emgu.CV.Image<Gray, Int32>(ImageFrame.ToBitmap());
//Matrix<Int32> grad_x;
//Matrix<Int32> grad_y;
//Emgu.CV.CvInvoke.cvCvtColor(pixels, pixels_grey, COLOR_CONVERSION.CV_RGB2GRAY);
//Emgu.CV.CvInvoke.cvSobel(pixels_grey, (IntPtr)(grad_x), 1, 0, 3);
//Emgu.CV.CvInvoke.cvSobel(pixels_grey, grad_y, 0, 1, 3);
//CvInvoke.cvCanny(ImageGrey, ImageGrey, 10, 60, 3);
CvInvoke.cvSobel(ImageGrey, grad_x, 1, 0, 3);
CvInvoke.cvConvertScaleAbs(grad_x, grad_x_abs, 1, 0);
CvInvoke.cvSobel(ImageGrey, grad_y, 0, 1, 3);
CvInvoke.cvConvertScaleAbs(grad_y, grad_y_abs, 1, 0);
CvInvoke.cvAddWeighted(grad_x_abs, 0.5, grad_y_abs, 0.5, 0, grad);
double mn = .0,mx = .0;
var mnpnt = new System.Drawing.Point();
var mxpnt = new System.Drawing.Point();
CvInvoke.cvMinMaxLoc(grad, ref mn, ref mx, ref mnpnt, ref mxpnt, IntPtr.Zero);
//CvInvoke.cvThreshold(grad, grad, mx - (mx - mn)/1.2 , 0, THRESH.CV_THRESH_TOZERO);
grad = grad.SmoothGaussian(17);
CvInvoke.cvAdaptiveThreshold(grad, grad, mx, ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, THRESH.CV_THRESH_BINARY, 3, mn);
//Image<Gray, byte> grad_x = ImageGrey.Sobel(1, 0, 3);
//Image<Gray, byte> grad_y = ImageGrey.Sobel(0, 1, 3);
//ImageFrame.Bitmap.Save(stream, ImageFormat.Bmp);
//ImageGrey.Bitmap.Save(stream, ImageFormat.Bmp);
grad.Bitmap.Save(stream, ImageFormat.Bmp);
ImageSrc = new BitmapImage();
ImageSrc.BeginInit();
ImageSrc.StreamSource = new MemoryStream( stream.ToArray() ); //line 2
ImageSrc.EndInit();
ImageBox.Source = ImageSrc;
}
}
}
示例12: piecesCheck2
private Image<Hls, Byte> piecesCheck2(Image<Bgr, Byte> img)
{
Image<Hls, Byte> result = new Image<Hls, byte>(img.Bitmap).PyrDown().PyrUp();
Game a = new Game(leftRadio.IsChecked.Value);
if (gaussian == true)
result = result.SmoothGaussian(gaussianValue);
if (contrast == true)
result._EqualizeHist();
//result[2] += saturation;
int countBlack;
int countWhite;
List<int> gameState = new List<int>();
for (int i = 0; i < 32; i++)
{
gameState.Add(2);
}
for (int i = 0; i < 32; i++)
{
int x = (int)boxList[i].center.X;
int y = (int)boxList[i].center.Y;
countWhite = 0;
countBlack = 0;
byte asd = result.Data[y, x, 1];
if (asd > whiteLightness)
{
countWhite++;
gameState[i] = 0;
result.Draw(new CircleF(boxList[i].center, 3), new Hls(120, 50, 100), 3);
}
if (asd < blackLightness)
{
countBlack++;
gameState[i] = 1;
result.Draw(new CircleF(boxList[i].center, 3), new Hls(220, 60, 100), 3);
}
}
previousGame = a;
a.updateStatus(gameState);
currentGame = a;
return result;
}
示例13: GetFilteredImage
private Image<Gray, byte> GetFilteredImage(Image<Bgr, byte> image)
{
Image<Bgr, Byte> smoothedImage = image.SmoothGaussian(5, 5, 1.5, 1.5);
Image<Gray, Byte> smoothedRedMask = GetRedPixelMask(smoothedImage);
smoothedRedMask._Dilate(1);
smoothedRedMask._Erode(1);
//Image<Gray, Byte> cannyImage = smoothedRedMask.Erode(5).Dilate(5).Canny(new Gray(100), new Gray(50));
Image<Gray, Byte> cannyImage = smoothedRedMask.Erode(5).Dilate(5).Canny(100, 50);
return cannyImage;
}
示例14: StartDye
public bool StartDye(int X, int Y, int Heigh, int Width, Image<Gray, Byte> ImgGray)
{
heigh = Heigh;
width = Width;
Start_Pixel_x = X;
Start_Pixel_y = Y;
imgGray = ImgGray;
if (width <= 24) //elliminate noise 67,24
{ return false; }
//ArrayList Set_Pixel_In_Row = new ArrayList(); // Array for college
imgGray = imgGray.AddWeighted(imgGray, 1.0, 0.0, 0.0);
imgGray = imgGray.ThresholdToZero(new Gray(100));
imgGray = imgGray.SmoothGaussian(9);
imgGray = imgGray.Canny(0, 80);
bitmap = new Bitmap(imgGray.ToBitmap());
Initial_Dye_Pixel(0);
return Flag;
}
示例15: Run
public cFeedBackMessage Run()
{
if (base.Start() == false)
{
base.FeedBackMessage.IsSucceed = false;
return base.FeedBackMessage;
}
object _firstValue = base.ListProperties.FindByName("Kernel Size");
int KernelSize = 0;
if (_firstValue == null)
{
base.GenerateError("Kernel Size not found !");
return base.FeedBackMessage;
}
try
{
cProperty TmpProp = (cProperty)_firstValue;
KernelSize = (int)TmpProp.GetValue();
}
catch (Exception)
{
base.GenerateError("Kernel Size cast didn't work");
return base.FeedBackMessage;
}
//Matrix<float> Signature1 = new Matrix<float>(this.Count, 2);
//Matrix<float> Signature2 = new Matrix<float>(CompareTo.Count, 2);
//for (int Idx = 0; Idx < this.Count; Idx++)
//{
// Signature1[Idx, 0] = (float)this[Idx];
// Signature1[Idx, 1] = Idx;
// Signature2[Idx, 0] = (float)CompareTo[Idx];
// Signature2[Idx, 1] = Idx;
//}
//double ResutatEMD;
//ResutatEMD = CvInvoke.cvCalcEMD2(Signature1.Ptr, Signature2.Ptr, DIST_TYPE.CV_DIST_L1, null, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero);
//Emgu.CV.Structure.MCvPoint2D64f
// IntPtr SrcImage = CvInvoke.cvCreateImage(
// Matrix<float> Src = new Matrix<float>(input.Data[inputBand].Data.ToArray());
// Matrix<float> Dst = new Matrix<float>(output.Data[inputBand].Data.ToArray());
// CvArray<IPL_DEPTH.IPL_DEPTH_32F> SRC = new
//IntPtr Src = CvInvoke.cvCreateImageHeader(new Size(input.Width, input.Height), IPL_DEPTH.IPL_DEPTH_32F, 1);
//Src = Marshal.UnsafeAddrOfPinnedArrayElement(input.Data[inputBand].Data, 0);
//CvInvoke.image
//ipl_image_p->imageData = my_float_image_data;
// ipl_image_p->imageDataOrigin = ipl_image_p->imageData;
base.Output = new cImage(base.Input, false);
for (int IdxChannel = 0; IdxChannel < base.ListChannelsToBeProcessed.Count; IdxChannel++)
{
int CurrentChannel = base.ListChannelsToBeProcessed[IdxChannel];
Image<Gray, float> inputImage = new Image<Gray, float>(Input.Width, Input.Height);
//float[,] SrcArray = new float[input.Width, input.Height];
for (int j = 0; j < Input.Height; j++)
for (int i = 0; i < Input.Width; i++)
{
inputImage.Data[j, i, 0] = Input.SingleChannelImage[CurrentChannel].Data[i + j * Input.Width];
}
Image<Gray, float> ProcessedImage = new Image<Gray, float>(inputImage.Width, inputImage.Height);
ProcessedImage = inputImage.SmoothGaussian(KernelSize);
this.Output.SingleChannelImage[IdxChannel].SetNewDataFromOpenCV(ProcessedImage);
// CvInvoke.cvSmooth(inputImage.Ptr, smoothedImage.Ptr, SMOOTH_TYPE.CV_MEDIAN, 5, 0, 0, 0);
//CvInvoke.cvSobel(inputImage.Ptr, smoothedImage.Ptr, 2, 2, 2);
//CvInvoke.cvSmooth(inputImage.Ptr, smoothedImage.Ptr, SMOOTH_TYPE.CV_GAUSSIAN,13, 13, 1.5, 1);
//smoothedImage = inputImage.Sobel(1, 0, 3);
}
// float[,] DestArray = new float[output.Width,output.Height];
// IntPtr MyintPtrDst = Marshal.UnsafeAddrOfPinnedArrayElement(DestArray, 0);
// CvInvoke.cvShowImage("Test", MyintPtr);
//CvInvoke.cvSmooth(MyintPtrSrc, MyintPtrDst, SMOOTH_TYPE.CV_GAUSSIAN, 0, 0, 3, 0);
//for (int j = 0; j < input.Height; j++)
// for (int i = 0; i < input.Width; i++)
// {
// output.Data[outputBand].Data[i + j * output.Width] = ;
// }
// IntPtr Dest = CvInvoke.cvCreateMat(output.Width, output.Height, MAT_DEPTH.CV_32F);
//Src.Width = input.Width;
//.........这里部分代码省略.........