本文整理汇总了C#中Image.SmoothBlur方法的典型用法代码示例。如果您正苦于以下问题:C# Image.SmoothBlur方法的具体用法?C# Image.SmoothBlur怎么用?C# Image.SmoothBlur使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.SmoothBlur方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: button1_Click
private void button1_Click(object sender, EventArgs e)
{
//pierwszy czarnobialy obrazek
String photoString = "";
//String photoPath = @"C:\Users\Michał\Downloads\" + photoString + ".jpg";
String photoPath = null;
///margines aby wykluczyc pozostale znalezione punkty
float selectionMargin = 0.25f;
List<Point> points = new List<Point>();
int minArea = 1000;
float maxSize = 0f;
if (openFileDialog1.ShowDialog() == DialogResult.OK)
{
photoPath = openFileDialog1.InitialDirectory + openFileDialog1.FileName;
}
Bitmap photo = new Bitmap(photoPath);
using (Image<Gray, Byte> img = new Image<Gray, byte>(photo))
{
MSERDetector mserDetector = new MSERDetector(5, 1440000, minArea, 0.25f, 0.2f, 200, 1.01, 0.00003, 10);
Image<Gray, Byte> img2 = img.SmoothBlur(30, 30, true);
MKeyPoint[] keyPoints = mserDetector.DetectKeyPoints(img, null);
foreach (MKeyPoint p in keyPoints)
{
if (((p.Point.X / img.Width > selectionMargin) && (p.Point.X / img.Width < 1 - selectionMargin))
&& ((p.Point.Y / img.Height > selectionMargin) && (p.Point.Y / img.Height < 1 - selectionMargin)))
{
points.Add(new Point((int)(p.Point.X), (int)(p.Point.Y)));
if (p.Size > maxSize)
maxSize = p.Size;
}
}
float sumX = 0f, sumY = 0f;
foreach (Point p in points)
{
sumX += p.X; sumY += p.Y;
}
img.Draw(new CircleF(new PointF(sumX / points.Count, sumY / points.Count), 8), new Gray(), 10);
//img.DrawPolyline(points.ToArray(), true, new Gray(), 4);
textBox1.Clear();
textBox1.AppendText((sumX / points.Count).ToString() + ":" + (sumY / points.Count).ToString() + ":" + (maxSize/2).ToString());
//imgWhite.Draw(points, new Gray(0), 10);
//Show the image using ImageViewer from Emgu.CV.UI
pictureBox2.Image = img.ToBitmap();
img.Save(@"C:\Users\Michał\Downloads\nowy.jpg");
}
}
示例2: ProcessFrame
private Bitmap ProcessFrame(Bitmap bitmap)
{
Image<Bgr, Byte> frame = new Image<Bgr,byte>(bitmap); //capture.QueryFrame();//RetrieveBgrFrame();
//if (frame == null) return;
//Image<Gray, Byte> smallGrayFrame = grayFrame.PyrDown();
//Image<Gray, Byte> smoothedGrayFrame = smallGrayFrame.PyrUp();
//Image<Gray, Byte> cannyFrame = smoothedGrayFrame.Canny(new Gray(100), new Gray(60));
Image<Gray, Byte> smooth = frame.SmoothBlur(1, 1).Convert<Gray, Byte>();
//Image<Gray, Byte> smoothImage = frame.Convert<Gray, Byte>();
//smoothImage._EqualizeHist();
//smoothImage._GammaCorrect(10.0d);
Image<Gray, Byte> threshold = smooth.ThresholdBinary(new Gray(240), new Gray(255));
Image<Gray, Byte> canny = threshold.Canny(new Gray(255), new Gray(255));
Contour<Point> largestContour = null;
double largestArea = 0;
currLEDStatus = false;
for (Contour<System.Drawing.Point> contours = canny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL); contours != null; contours = contours.HNext)
{
Point pt = new Point(contours.BoundingRectangle.X, contours.BoundingRectangle.Y);
//draw.Draw(new CircleF(pt, 5), new Gray(255), 0);
if (contours.Area > largestArea)
{
largestArea = contours.Area;
largestContour = contours;
}
canny.Draw(contours.BoundingRectangle, new Gray(255), 1);
//frame.Draw(contours.BoundingRectangle, new Bgr(Color.Red), 3);
if (largestContour != null)
{
frame.Draw(largestContour.BoundingRectangle, new Bgr(Color.Red), 2);
currLEDStatus = true;
string message = string.Format("{0}, Area={1}", "LargestContour", largestContour.Area);
//OnVerbose(message);
}
}
if (currLEDStatus != prevLEDStatus)
{
//string message = String.Format("LED Status = {0}", currLEDStatus ? "ON" : "OFF");
if (currLEDStatus == true)
{
string message = string.Format("{0:HH:mm:ss.fff}", DateTime.Now);
Logger.Write(message, EnumLoggerType.Output);
}
//OnVerbose(message);
//OnLEDStatusChanged(currLEDStatus);
}
prevLEDStatus = currLEDStatus;
//OnImageProcessed(frame, smooth, threshold, canny);
//form.DrawFrame(frame);
return frame.Bitmap;
}
示例3: Process
public void Process()
{
///margines aby wykluczyc pozostale znalezione punkty
float selectionMargin = 0.25f;
List<Point> points = new List<Point>();
int minArea = 1000;
float maxSize = 0f;
float resultX = 0f;
float resultY = 0f;
float resultR = 0f;
float sumX = 0f, sumY = 0f;
foreach (Bitmap bitmap in bitmaps)
{
points.Clear();
resultX = 0f;
resultY = 0f;
resultR = 0f;
sumX = 0f;
sumY = 0f;
using (Image<Gray, Byte> img = new Image<Gray, byte>(bitmap))
{
MSERDetector mserDetector = new MSERDetector(5, 1440000, minArea, 0.25f, 0.2f, 200, 1.01, 0.00003, 10);
Image<Gray, Byte> img2 = img.SmoothBlur(30, 30, true);
MKeyPoint[] keyPoints = mserDetector.DetectKeyPoints(img, null);
foreach (MKeyPoint p in keyPoints)
{
if (((p.Point.X / img.Width > selectionMargin) && (p.Point.X / img.Width < 1 - selectionMargin))
&& ((p.Point.Y / img.Height > selectionMargin) && (p.Point.Y / img.Height < 1 - selectionMargin)))
{
points.Add(new Point((int)(p.Point.X), (int)(p.Point.Y)));
if (p.Size > maxSize)
maxSize = p.Size;
}
}
foreach (Point p in points)
{
sumX += p.X; sumY += p.Y;
}
resultX = sumX / points.Count;
resultY = sumY / points.Count;
resultR = maxSize / 2;
results.Add(new Result(resultX, resultY, resultR));
}
}
}
示例4: ApplyEffect
private BitmapSource ApplyEffect(EffectType effect, byte[] pixelData, System.Drawing.Bitmap bitmap, Image<Bgra, byte> ocvImage, List<Rectangle> effectRegions)
{
// lock the bitmap for writing
BitmapData data = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height),
ImageLockMode.WriteOnly, bitmap.PixelFormat);
// copy the data from pixelData to BitmapData
Marshal.Copy(pixelData, 0, data.Scan0, pixelData.Length);
// unlock the bitmap
bitmap.UnlockBits(data);
// assign the bitmap to the OpenCV image
ocvImage.Bitmap = bitmap;
if(effect != EffectType.None)
{
foreach(Rectangle effectRegion in effectRegions)
{
// set the Region of Interest based on the joint
ocvImage.ROI = effectRegion;
// temp image to hold effect output
Image<Bgra, byte> ocvTempImg;
switch(effect)
{
case EffectType.Blur:
ocvTempImg = ocvImage.SmoothBlur(20, 20);
break;
case EffectType.Dilate:
ocvTempImg = ocvImage.Dilate(5);
break;
case EffectType.Erode:
ocvTempImg = ocvImage.Erode(5);
break;
case EffectType.Edge:
Image<Gray, byte> gray = ocvImage.Convert<Gray, byte>();
gray = gray.SmoothBlur(3, 3);
gray = gray.Canny(30.0f, 50.0f);
ocvTempImg = gray.Convert<Bgra, byte>();
break;
default:
throw new ArgumentOutOfRangeException("effect");
}
// copy the effect area to the final image
CvInvoke.cvCopy(ocvTempImg, ocvImage, IntPtr.Zero);
}
}
// reset the Region of Interest
ocvImage.ROI = Rectangle.Empty;
#region Convert System.Drawing.Bitmap to WPF BitmapSource
// get a bitmap handle from the OpenCV image
IntPtr hBitmap = ocvImage.ToBitmap().GetHbitmap();
// convert that handle to a WPF BitmapSource
BitmapSource bitmapSource = Imaging.CreateBitmapSourceFromHBitmap(hBitmap, IntPtr.Zero, Int32Rect.Empty,
BitmapSizeOptions.FromWidthAndHeight(
bitmap.Width, bitmap.Height));
// delete the bitmap
DeleteObject(hBitmap);
#endregion
return bitmapSource;
}
示例5: createNoisemap
// ----- Noisemap erzeugen -----
private int[,] createNoisemap()
{
Console.WriteLine("Add Noise");
int initialSize = 128;
int size = initialSize;
Random rnd = new Random();
// Bilder erstellen
imageNoise = new Image<Gray, Byte>(imageWidth, imageHeight);
Image<Gray, Byte> imageTemp = new Image<Gray, Byte>(imageWidth, imageHeight);
int[,] currentNoiseMap = new int[imageHeight, imageWidth];
Image<Gray, Byte> currentNoiseImage = new Image<Gray, Byte>(imageWidth, imageHeight);
int[,] finalNoiseMap = new int[imageHeight, imageWidth];
Image<Gray, Byte> finalNoiseImage = new Image<Gray, Byte>(imageWidth, imageHeight);
// Zähler für Loop
int loop = 0;
// Mehrere Noise Bilder erstellen
while (size >= 1)
{
loop++;
for (int h = 0; h < imageHeight; h++)
{
for (int w = 0; w < imageWidth; w++)
{
// Zufallszahl 0-255 erzeugen
currentNoiseMap[h, w] = Convert.ToByte(rnd.Next(256));
//Console.WriteLine(noiseMap[h, w]);
// Grauwert berechnen
//imageTemp.Data[h, w, 0] = Convert.ToByte(noiseMap[h / size, w / size] / loop);
currentNoiseImage.Data[h, w, 0] = Convert.ToByte(currentNoiseMap[h / size, w / size] / loop);
}
}
// Noise-Bild glätten
currentNoiseImage = currentNoiseImage.SmoothBlur(size, size);
//currentNoiseImage.Save("../../noise" + size + ".png");
//Console.WriteLine(Math.Log(initialSize, 2) + 1);
// Werte addieren
for (int h = 0; h < imageHeight; h++)
{
for (int w = 0; w < imageWidth; w++)
{
finalNoiseMap[h, w] += currentNoiseImage.Data[h, w, 0];
}
}
// Bild zum Gesamt-Noise addieren
//imageNoise += imageTemp / (Math.Log(initialSize, 2) + 1);
//imageNoise += imageTemp / (Math.Log(initialSize, 2) + 1);
// Pixelgröße für nächsten Schleifendurchlauf halbieren
size /= 2;
}
//
int min = 999;
int max = 0;
for (int h = 0; h < imageHeight; h++)
{
for (int w = 0; w < imageWidth; w++)
{
finalNoiseMap[h, w] /= Convert.ToInt32(Math.Log(initialSize, 2) + 1);
//imageNoise.Data[h, w, 0] = (byte)finalNoiseMap[h, w];
//noiseMap[h, w] = finalNoiseMap[h, w];
if (max < finalNoiseMap[h, w]) { max = finalNoiseMap[h, w]; }
if (min > finalNoiseMap[h, w]) { min = finalNoiseMap[h, w]; }
}
}
// Werte für auf 0..255 normalisieren und speichern
double range = max - min;
for (int h = 0; h < imageHeight; h++)
{
for (int w = 0; w < imageWidth; w++)
{
// Normalisieren
finalNoiseMap[h, w] = Convert.ToInt32((double)(finalNoiseMap[h, w] - min) / range * 255);
// Bild erstellen
imageNoise.Data[h, w, 0] = (byte)finalNoiseMap[h, w];
}
}
//Console.WriteLine(min + " " + max);
//imageNoise.Save("noise.jpg");
//imageNoise.Save("../../noiseResult.png");
//ib_fog.Image = imageNoise;
//.........这里部分代码省略.........
示例6: SensorAllFramesReady
//.........这里部分代码省略.........
// DEMO 0: You
if (demo == 0 && skeleton != null && colourImage != null && depthImage != null)
{
displayImage = colourImage.Copy();
if (ShowDebug)
{
}
}
// DEMO 1: blur and boost
else if (demo == 1 && skeleton != null && colourImage != null
&& depthImage != null)
{
SkeletonPoint sleft = skeleton.Joints[JointType.HandLeft].Position;
SkeletonPoint sright = skeleton.Joints[JointType.HandRight].Position;
double hand_x_dist = Math.Abs(sleft.X - sright.Y);
double hand_y_dist = Math.Abs(sleft.Y - sright.Y);
// scale by 2 to speed up
displayImage = colourImage.Resize(0.5, INTER.CV_INTER_NN);
// displayImage = colourImage.Copy(); // slower
// boost the RGB values based on vertical hand distance
float boost = 3 - (float)(hand_y_dist * 5);
displayImage = colourImage.Convert(delegate(Byte b)
{ return (byte)((b * boost < 255) ? (b * boost) : 255); });
// blur based on horizontal hand distance
int blur = (int)(hand_x_dist * 20);
if (blur > 0)
displayImage = displayImage.SmoothBlur(blur, blur);
// show debug
if (ShowDebug)
{
debugImg2 = depthImage.Convert<Bgr, Byte>();
DepthImagePoint dp;
dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(sleft,
sensor.DepthStream.Format);
debugImg2.Draw(new CircleF(dp.ToPointF(), 20), new Bgr(Color.Coral), 1);
dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(sright,
sensor.DepthStream.Format);
debugImg2.Draw(new CircleF(dp.ToPointF(), 20), new Bgr(Color.LightGreen), 1);
Utilities.WriteDebugText(debugImg2, 10, 40, "{0:.00}m {1:0.00}m",
hand_x_dist, hand_y_dist);
}
}
// DEMO 2: Painting
else if (demo == 2 && skeleton != null &&
colourImage != null && depthImage != null)
{
// create a player mask for player we want
byte playerIndex = (byte)(Array.IndexOf(skeletons, skeleton) + 1);
//double[] min, max;
//Point[] pmin, pmax;
//playerMasks.MinMax(out min, out max, out pmin, out pmax);
// pick the player mask for the skeleton we're tracking
Image<Gray, Byte> playerMask = playerMasks.Convert(delegate(Byte b)
{ return (Byte)(b == playerIndex ? 255 : 0); });
示例7: button1_Click
private void button1_Click(object sender, EventArgs e)
{
openFileDialog1.Filter = "JPG Files (*.jpg)|*.jpg|JPEG Files (*.jpeg)|*.jpeg|PNG Files (*.png)|*.png|GIF Files (*.gif)|*.gif";
if (openFileDialog1.ShowDialog() == DialogResult.OK)
{
//Load the image from file
Image<Bgr, Byte> img_bgr = new Image<Bgr, byte>(openFileDialog1.FileName);
imageBox1.Image = img_bgr.Resize(315, 266, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
//Convert the image to grayscale and filter out the noise
img = img_bgr.Convert<Gray, Byte>();
// Blur the image
img1 = img.SmoothBlur(6,6, true);
}
}
示例8: preProcessImage
private static Image<Gray, byte> preProcessImage(
Image<Gray, byte> gImage,
int para1,
int para2)
{
//Image<Gray, byte> gImage = frame.Convert<Gray, byte>();
//gImage = gImage.ConvertScale<byte>(0.25, 0);
//Image<Gray, byte> gImage2 = gImage.Clone();
//gImage2 = gImage2.SmoothBlur((int)para2, (int)para2);
//gImage2 = gImage2.Not();
//gImage = gImage.AddWeighted(gImage2, 1, 0.5, 0);
//gImage._EqualizeHist();
gImage = gImage.Resize(0.25, INTER.CV_INTER_NN);
gImage = gImage.SmoothBlur(11, 11);
gImage = gImage.ThresholdBinary(new Gray(para1), new Gray(255));
//gImage = gImage.ThresholdAdaptive(
// new Gray(255),
// Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C,
// Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY,
// para1,
// new Gray(para2)
//);
gImage = gImage.MorphologyEx(
new StructuringElementEx(
3,
(int)para2,
1,
(int)para2 / 2,
Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE),
Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_OPEN,
1);
gImage = gImage.Resize(4, INTER.CV_INTER_NN);
return gImage;
}