本文整理汇总了C#中Image.PyrDown方法的典型用法代码示例。如果您正苦于以下问题:C# Image.PyrDown方法的具体用法?C# Image.PyrDown怎么用?C# Image.PyrDown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.PyrDown方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Browse_Click
private void Browse_Click(object sender, RoutedEventArgs e)
{
OpenFileDialog dlg = new OpenFileDialog();
System.Windows.Forms.DialogResult result = dlg.ShowDialog();
if (result == System.Windows.Forms.DialogResult.OK)
{
string filename = dlg.FileName;
FileInfo info = new FileInfo(filename);
if (!info.Exists)
{
RunButton.IsEnabled = false;
}
else
{
RunButton.IsEnabled = true;
PathBox.Text = info.FullName;
try
{
Image<Bgr, Byte> table = new Image<Bgr, byte>(info.FullName);
ImageSource bitmap = TreeViz.ToBitmapSource(table.PyrDown().PyrDown());
Shower.Source = bitmap;
}
catch (Exception ex)
{
throw ex;
}
}
}
}
示例2: DetectSquares
public static DetectionData DetectSquares(Image<Gray, byte> src, string detectionWindow = "")
{
for (int i = 0; i < 1; i++)
{
src = src.PyrDown();
src = src.PyrUp();
}
src = src.Erode(1);
Gray cannyThreshold = new Gray(255);
Gray cannyThresholdLinking = new Gray(1);
Image<Gray, Byte> cannyEdges = src.Canny(cannyThreshold.Intensity,cannyThresholdLinking.Intensity,3);
LineSegment2D[] lines = cannyEdges.HoughLinesBinary(
1, //Distance resolution in pixel-related units
Math.PI / 45.0, //Angle resolution measured in radians.
20, //threshold
30, //min Line width
10 //gap between lines
)[0]; //Get the lines from the first channel
List<Rectangle> rectanglesList = new List<Rectangle>();
using (var storage = new MemStorage())
{
for (Contour<Point> contours = cannyEdges.FindContours(); contours != null; contours = contours.HNext)
{
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);
if (currentContour.BoundingRectangle.Height * currentContour.BoundingRectangle.Width > 50) //only consider contours with area greater than 250
{
if (currentContour.Total >= 4) //The contour has more than 4 vertices.
{
var boundingRectangle = currentContour.BoundingRectangle;
if (!rectanglesList.Exists(rect => rect.IntersectsWith(boundingRectangle)))
rectanglesList.Add(boundingRectangle);
}
}
}
}
ShowInNamedWindow(cannyEdges, detectionWindow);
return new DetectionData(rectanglesList, src);
}
示例3: Segm_Process
void Segm_Process()
{
//преобразование изображения в чб
imgProcessed = imgOriginal.Convert<Gray, Byte>();
//автоконтраст
if (equalizeHist)
imgProcessed._EqualizeHist();
//фильтр шума
Image<Gray, byte> smoothedGrayFrame = imgProcessed.PyrDown();
smoothedGrayFrame = smoothedGrayFrame.PyrUp();
Image<Gray, byte> cannyFrame = null;
//поиск контуров, если работает с фильтром шума
if (noiseFilter)
cannyFrame = smoothedGrayFrame.Canny(nfVal, nfVal);
//затемнение
if (blur)
imgProcessed = smoothedGrayFrame;
//пороговое преобразование
CvInvoke.cvAdaptiveThreshold(imgProcessed, imgProcessed, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 4 + 4 % 2 + 1, thresVal);
//белое в черное
imgProcessed._Not();
try
{
if (cannyFrame != null)
imgProcessed._Or(cannyFrame);
}
catch { }
if (cannyFrame != null)
cannyFrame = cannyFrame.Dilate(3);
//поиск контуров
var sourceContours = imgProcessed.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
//фильтруем контуры
contours = FilterContours(sourceContours, cannyFrame, imgProcessed.Width, imgProcessed.Height);
ibOriginal.Image = imgProcessed;
}
示例4: TestOclPyr
public void TestOclPyr()
{
if (!CvInvoke.HaveOpenCL)
return;
Image<Gray, Byte> img = new Image<Gray, byte>(640, 480);
//add some randome noise to the image
img.SetRandUniform(new MCvScalar(), new MCvScalar(255, 255, 255));
Image<Gray, Byte> down = img.PyrDown();
//Emgu.CV.UI.ImageViewer.Show(down);
Image<Gray, Byte> up = down.PyrUp();
UMat gImg = img.ToUMat();
UMat gDown = new UMat();
UMat gUp = new UMat();
CvInvoke.PyrDown(gImg, gDown);
CvInvoke.PyrUp(gDown, gUp);
CvInvoke.AbsDiff(down, gDown.ToImage<Gray, Byte>(), down);
CvInvoke.AbsDiff(up, gUp.ToImage<Gray, Byte>(), up);
double[] minVals, maxVals;
Point[] minLocs, maxLocs;
down.MinMax(out minVals, out maxVals, out minLocs, out maxLocs);
double maxVal = 0.0;
for (int i = 0; i < maxVals.Length; i++)
{
if (maxVals[i] > maxVal)
maxVal = maxVals[i];
}
Trace.WriteLine(String.Format("Max diff: {0}", maxVal));
EmguAssert.IsTrue(maxVal <= 1.0);
//Assert.LessOrEqual(maxVal, 1.0);
up.MinMax(out minVals, out maxVals, out minLocs, out maxLocs);
maxVal = 0.0;
for (int i = 0; i < maxVals.Length; i++)
{
if (maxVals[i] > maxVal)
maxVal = maxVals[i];
}
Trace.WriteLine(String.Format("Max diff: {0}", maxVal));
EmguAssert.IsTrue(maxVal <= 1.0);
//Assert.LessOrEqual(maxVal, 1.0);
}
示例5: Run
private Image<Bgr, Byte> Run(string filename)
{
Stopwatch watch = new Stopwatch();
watch.Start();
ContourAnalyzer ca = new ContourAnalyzer();
Image<Bgr, Byte> table = new Image<Bgr, byte>(filename);
table = table.PyrDown().PyrDown();
int debuglevel = cmbDebuglevel.SelectedIndex;
Settings settings = new Settings(debuglevel);
Dictionary<Card, System.Drawing.Point> cards = ca.LocateCards(table, settings);
Logic logic = new Logic();
HashSet<List<Card>> sets = logic.FindSets(new List<Card>(cards.Keys));
Random rnd = new Random();
foreach (List<Card> set in sets)
{
DrawSet(table, cards, rnd, set);
}
watch.Stop();
this.Title = String.Format("Done. Elapsed time: {0}", watch.Elapsed.ToString());
ImageSource bitmap = TreeViz.ToBitmapSource(table);
Shower.Source = bitmap;
return table;
}
示例6: PictureTaken
public void PictureTaken(object sender, ProcessedCameraPreview.PictureTakenEventArgs ea)
{
Android.Graphics.Bitmap bmp = ea.Bitmap;
Camera camera = ea.Camera;
try
{
Android.Graphics.Bitmap thumbnail = null;
int maxThumbnailSize = 96;
if (_imageFilter == null)
{
_lastSavedImageFile = ProcessedCameraPreview.SaveBitmap(this, bmp, PackageName, _topLayer);
thumbnail = ProcessedCameraPreview.GetThumbnail(bmp, maxThumbnailSize);
bmp.Dispose();
}
else
{
Image<Bgr, Byte> buffer1 = new Image<Bgr, byte>(bmp);
bmp.Dispose();
using (ImageFilter filter = _imageFilter.Clone() as ImageFilter)
{
if (filter is DistorFilter)
{
//reduce the image size to half because the distor filter used lots of memory
Image<Bgr, Byte> tmp = buffer1.PyrDown();
buffer1.Dispose();
buffer1 = tmp;
}
if (filter.InplaceCapable)
filter.ProcessData(buffer1.Mat, buffer1.Mat);
else
{
Image<Bgr, Byte> buffer2 = new Image<Bgr, byte>(buffer1.Size);
filter.ProcessData(buffer1.Mat, buffer2.Mat);
buffer1.Dispose();
buffer1 = buffer2;
}
}
using (Android.Graphics.Bitmap result = buffer1.ToBitmap())
{
buffer1.Dispose();
_lastSavedImageFile = ProcessedCameraPreview.SaveBitmap(this, result, PackageName, _topLayer);
thumbnail = ProcessedCameraPreview.GetThumbnail(result, maxThumbnailSize);
}
}
_lastCapturedImageButton.SetImageBitmap(thumbnail);
}
catch (Exception excpt)
{
this.RunOnUiThread(() =>
{
while (excpt.InnerException != null)
excpt = excpt.InnerException;
AlertDialog.Builder alert = new AlertDialog.Builder(this);
alert.SetTitle("Error saving file");
alert.SetMessage(excpt.Message);
alert.SetPositiveButton("OK", (s, er) => { });
alert.Show();
});
return;
}
/*
catch (FileNotFoundException e)
{
Android.Util.Log.Debug("Emgu.CV", e.Message);
}
catch (IOException e)
{
Android.Util.Log.Debug("Emgu.CV", e.Message);
} */
/*
try
{
ExifInterface exif = new ExifInterface(f.FullName);
// Read the camera model and location attributes
exif.GetAttribute(ExifInterface.TagModel);
float[] latLng = new float[2];
exif.GetLatLong(latLng);
// Set the camera make
exif.SetAttribute(ExifInterface.TagMake, "My Phone");
exif.SetAttribute(ExifInterface.TagDatetime, System.DateTime.Now.ToString());
}
catch (IOException e)
{
Android.Util.Log.Debug("Emgu.CV", e.Message);
}*/
Toast.MakeText(this, "File Saved.", ToastLength.Short).Show();
camera.StartPreview();
}
示例7: TestCudaPyr
public void TestCudaPyr()
{
if (!CudaInvoke.HasCuda)
return;
Image<Gray, Byte> img = new Image<Gray, byte>(640, 480);
img.SetRandUniform(new MCvScalar(), new MCvScalar(255, 255, 255));
Image<Gray, Byte> down = img.PyrDown();
Image<Gray, Byte> up = down.PyrUp();
CudaImage<Gray, Byte> gImg = new CudaImage<Gray, byte>(img);
CudaImage<Gray, Byte> gDown = new CudaImage<Gray, byte>(img.Size.Width >> 1, img.Size.Height >> 1);
CudaImage<Gray, Byte> gUp = new CudaImage<Gray, byte>(img.Size);
CudaInvoke.PyrDown(gImg, gDown, null);
CudaInvoke.PyrUp(gDown, gUp, null);
CvInvoke.AbsDiff(down, gDown.ToImage(), down);
CvInvoke.AbsDiff(up, gUp.ToImage(), up);
double[] minVals, maxVals;
Point[] minLocs, maxLocs;
down.MinMax(out minVals, out maxVals, out minLocs, out maxLocs);
double maxVal = 0.0;
for (int i = 0; i < maxVals.Length; i++)
{
if (maxVals[i] > maxVal)
maxVal = maxVals[i];
}
Trace.WriteLine(String.Format("Max diff: {0}", maxVal));
Assert.LessOrEqual(maxVal, 1.0);
up.MinMax(out minVals, out maxVals, out minLocs, out maxLocs);
maxVal = 0.0;
for (int i = 0; i < maxVals.Length; i++)
{
if (maxVals[i] > maxVal)
maxVal = maxVals[i];
}
Trace.WriteLine(String.Format("Max diff: {0}", maxVal));
Assert.LessOrEqual(maxVal, 1.0);
}
示例8: ProcessImage
public void ProcessImage(Image<Gray, byte> grayFrame)
{
if (equalizeHist)
grayFrame._EqualizeHist();//autocontrast
//smoothed
Image<Gray, byte> smoothedGrayFrame = grayFrame.PyrDown();
smoothedGrayFrame = smoothedGrayFrame.PyrUp();
//canny
Image<Gray, byte> cannyFrame = null;
if (noiseFilter)
cannyFrame = smoothedGrayFrame.Canny(new Gray(cannyThreshold),
new Gray(cannyThreshold));
//smoothing
if (blur)
grayFrame = smoothedGrayFrame;
//binarize
CvInvoke.cvAdaptiveThreshold(grayFrame, grayFrame, 255,
Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C,
Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY,
adaptiveThresholdBlockSize + adaptiveThresholdBlockSize % 2 + 1,
adaptiveThresholdParameter);
//
grayFrame._Not();
//
if (addCanny)
if (cannyFrame != null)
grayFrame._Or(cannyFrame);
//
this.binarizedFrame = grayFrame;
//dilate canny contours for filtering
if (cannyFrame != null)
cannyFrame = cannyFrame.Dilate(3);
//find contours
var sourceContours = grayFrame.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
//filter contours
contours = FilterContours(sourceContours, cannyFrame,
grayFrame.Width, grayFrame.Height);
//find templates
lock (foundTemplates)
foundTemplates.Clear();
samples.Clear();
lock (templates)
Parallel.ForEach<Contour<Point>>(contours, (contour) =>
{
var arr = contour.ToArray();
Template sample = new Template(arr, contour.Area, samples.templateSize);
lock (samples)
samples.Add(sample);
if (!onlyFindContours)
{
FoundTemplateDesc desc = finder.FindTemplate(templates, sample);
if (desc != null)
lock (foundTemplates)
foundTemplates.Add(desc);
}
}
);
//
FilterByIntersection(ref foundTemplates);
}
示例9: ProcessImage
public void ProcessImage(Image<Gray, byte> grayFrame, bool enableMaxContour = false)
{
if (equalizeHist)
grayFrame._EqualizeHist();//autocontrast
//smoothed
Image<Gray, byte> smoothedGrayFrame = grayFrame.PyrDown();
smoothedGrayFrame = smoothedGrayFrame.PyrUp();
//canny
Image<Gray, byte> cannyFrame = null;
if (noiseFilter)
cannyFrame = smoothedGrayFrame.Canny(new Gray(cannyThreshold), new Gray(cannyThreshold));
//smoothing
if (blur)
grayFrame = smoothedGrayFrame;
//binarize
CvInvoke.cvAdaptiveThreshold(grayFrame, grayFrame, 255, ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, THRESH.CV_THRESH_BINARY,
adaptiveThresholdBlockSize + adaptiveThresholdBlockSize % 2 + 1, adaptiveThresholdParameter);
//
grayFrame._Not();
//
if (addCanny)
if (cannyFrame != null)
grayFrame._Or(cannyFrame);
//
this.binarizedFrame = grayFrame;
//dilate canny contours for filtering
if (cannyFrame != null)
cannyFrame = cannyFrame.Dilate(3);
//find contours
var sourceContours = grayFrame.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, RETR_TYPE.CV_RETR_LIST);
//filter contours
contours = FilterContours(sourceContours, cannyFrame, grayFrame.Width, grayFrame.Height, enableMaxContour);
//contours = ConvertContours(sourceContours);
foundTemplates = FindTemplates(contours);
}
示例10: getKeyboardInput
void getKeyboardInput()
{
char inKey;
int series = 92;
img = fileImage.Clone();
gray = img.Convert<Gray, Byte>();
gray = gray.PyrDown().PyrUp();
Image<Gray, Byte> splitImage = img.Split()[2];
//find optimal redThreshold
bool found1 = false;
while (!found1)
{
splitImage = img.Split()[2].ThresholdToZero(new Gray(redThreshold));
if (splitImage.GetSum().Intensity > 255 * 800)
{
redThreshold += 1;
}
else if (splitImage.GetSum().Intensity < 255 * 500)
{
redThreshold -= 1;
}
else
{
found1 = true;
}
System.Console.WriteLine("Setting red to " + redThreshold);
}
splitImage = splitImage.ThresholdBinary(new Gray(150), new Gray(255));
gray = splitImage;
//gray = gray.ThresholdBinary(new Gray(grayThreshold), new Gray(grayMax));
System.Console.WriteLine("Threshold: " + threshold + " Threshold Linking: " + thresholdLinking + " Gray Threshold: " + redThreshold + " Image: " + fileName);
do
{
if (!Console.KeyAvailable)
continue;
inKey = Console.ReadKey().KeyChar;
switch (inKey)
{
case '8':
threshold += 5;
break;
case '2':
threshold -= 5;
break;
case '4':
thresholdLinking -= 5;
break;
case '6':
thresholdLinking += 5;
break;
case '1':
redThreshold -= 5;
break;
case '3':
redThreshold += 5;
break;
case '9':
series++;
break;
case '7':
series--;
break;
}
//gray = new Image<Gray, Byte>("C:/RoboSub/RoboImagesTest2/" + series + "c.png");
// gray = gray.PyrDown().PyrUp();
//img = new Image<Bgr, Byte>("C:/RoboSub/RoboImagesTest2/" + series + "c.png");
img = fileImage.Clone();
gray = img.Convert<Gray, Byte>();
gray = gray.PyrDown().PyrUp();
splitImage = img.Split()[2];
//find optimal redThreshold
bool found = false;
while (!found)
{
splitImage = img.Split()[2];
splitImage = splitImage.ThresholdToZero(new Gray(redThreshold));
if (splitImage.GetSum().Intensity > 255 * 100)
{
redThreshold += 1;
}
else if (splitImage.GetSum().Intensity < 255 * 10)
{
redThreshold -= 1;
}
else
{
found = true;
}
System.Console.WriteLine("Setting red to " + redThreshold);
}
gray = splitImage;
//gray = gray.ThresholdBinary(new Gray(grayThreshold), new Gray(grayMax));
//.........这里部分代码省略.........