本文整理汇总了C#中Image.Split方法的典型用法代码示例。如果您正苦于以下问题:C# Image.Split方法的具体用法?C# Image.Split怎么用?C# Image.Split使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.Split方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: ExtractSkinColor
//画像から肌色領域を抽出
private void ExtractSkinColor(Image<Hsv, byte> src, ref Image<Gray, byte> dst)
{
//チャンネルごとに分割して閾値処理
Image<Gray, byte>[] channels = src.Split();
channels[0] = channels[0].Convert<Byte>(delegate(Byte b) { if (HMAX >= b && b >= HMIN) return 255; else return 0; });
channels[1] = channels[1].Convert<Byte>(delegate(Byte b) { if (SMAX >= b && b >= SMIN) return 255; else return 0; });
channels[2] = channels[2].Convert<Byte>(delegate(Byte b) { if (VMAX >= b && b >= VMIN) return 255; else return 0; });
//各部分空間の積が求める領域
dst = (channels[0] & channels[1] & channels[2]);
}
示例2: ConvertImageButton_Click
private void ConvertImageButton_Click(object sender, RoutedEventArgs e)
{
if (bmp != null)
{
img = new Image<Bgr, byte>(bmp);
// Split the image into the 3 channels (R, G, B)
Image<Gray, Byte>[] splitImg = img.Split();
// We want the blue channel as this make the text closer to white
singleChannelImage = splitImg[2];
// Now we are going to run a binary threshold to try and remove the background
// The threshold is set high at the moment to remove as much guff as possible
try
{
int thresholdString = int.Parse(ThresholdValue.Text);
Gray grayThreshold = new Gray(thresholdString);
processedImage = singleChannelImage.ThresholdToZero(grayThreshold).Not();
// Load the bitmap into a BitmapSource for the image controll to display it
BitmapSource bmpSource = Utils.BitmapToBitmapSource(processedImage.ToBitmap());
// Update the image display to show the processed image
DisplayImage.Source = bmpSource;
}
catch (Exception exception)
{
if (exception is FormatException || exception is ArgumentNullException)
{
MessageBox.Show("Error: Exception caught: " + exception.Message);
}
}
}
else
{
MessageBox.Show("Error: Image has not been loaded", "Error", MessageBoxButton.OK, MessageBoxImage.Error);
}
}
示例3: dataStream
void dataStream(object sender, EventArgs e)
{
{
RangeF[] range = new RangeF[2];
range[0] = new RangeF(0, 180);
range[1] = new RangeF(0, 255);
pollColorImageStream();
pollDepthImageStream();
//Color------------------
Bitmap bitmapColor = new Bitmap(colorImage.Width, colorImage.Height, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
BitmapData bmd = bitmapColor.LockBits(new System.Drawing.Rectangle(0, 0, colorImage.Width, colorImage.Height), ImageLockMode.ReadWrite, bitmapColor.PixelFormat);
Marshal.Copy(colorPixelData, 0, bmd.Scan0, colorPixelData.Length);
bitmapColor.UnlockBits(bmd);
Image<Bgr, Byte> colorTemp = new Image<Bgr, Byte>(bitmapColor);
//Color------------------end
//depth------------------
byte[] byteDepth = new byte[640 * 480];
byte[] remap = new byte[640 * 480];
sensor.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, depthPixelData, ColorImageFormat.RgbResolution640x480Fps30, colorCoordinate);
for (int y = 0; y < 480; y++)
{
for (int x = 0; x < 640; x++)
{
int position = y * 640 + x;
short tempShort = depthPixelData[position];
//depthImage[y, x] = new Gray(tempShort);
byteDepth[position] = (byte)(tempShort >> 8);
//byteDepth[y, x] = new Gray((byte)(tempShort));
int positionRemap = colorCoordinate[position].Y * 640 + colorCoordinate[position].X;
if (positionRemap > 640 * 480)
continue;
depthRemapData[positionRemap] = depthPixelData[position];
remap[positionRemap] = (byte)(tempShort >> 8);
//byteDepth[y, x] = new Gray((byte)(tempShort));
}
}
Bitmap bitmapDepth = new Bitmap(depthImage.Width, depthImage.Height, System.Drawing.Imaging.PixelFormat.Format8bppIndexed);
BitmapData bmd2 = bitmapDepth.LockBits(new System.Drawing.Rectangle(0, 0, depthImage.Width, depthImage.Height), ImageLockMode.ReadWrite, bitmapDepth.PixelFormat);
Marshal.Copy(byteDepth, 0, bmd2.Scan0, byteDepth.Length);
bitmapDepth.UnlockBits(bmd2);
Image<Gray, Byte> depthTemp = new Image<Gray, Byte>(bitmapDepth);
//depth------------------end
Byte[] backFrame = new Byte[640 * 480];
BitmapImage trackingOut = new BitmapImage();
if (trackingFlag != 0)
{
Image<Hsv, Byte> hsv = new Image<Hsv, Byte>(640, 480);
CvInvoke.cvCvtColor(colorTemp, hsv, COLOR_CONVERSION.CV_BGR2HSV);
Image<Gray, Byte> hue = hsv.Split()[0];
//range of hist is 180 or 256? not quite sure
DenseHistogram hist = new DenseHistogram(180, new RangeF(0.0f, 179.0f));
Image<Gray, Byte> mask = new Image<Gray, Byte>(trackWindow.Width, trackWindow.Height);
for (int y = 0; y < 480; y++)
{
for (int x = 0; x < 640; x++)
{
if (x >= trackWindow.X && x < trackWindow.X + trackWindow.Width && y >= trackWindow.Y && y < trackWindow.Y + trackWindow.Height)
mask[y - trackWindow.Y, x - trackWindow.X] = hue[y, x];
}
}
hist.Calculate(new IImage[] { mask }, false, null);
//maybe need to re-scale the hist to 0~255?
//back projection
IntPtr backProject = CvInvoke.cvCreateImage(hsv.Size, IPL_DEPTH.IPL_DEPTH_8U, 1);
CvInvoke.cvCalcBackProject(new IntPtr[1] { hue }, backProject, hist);
CvInvoke.cvErode(backProject, backProject, IntPtr.Zero, 3);
//CAMshift
CvInvoke.cvCamShift(backProject, trackWindow, new MCvTermCriteria(50, 0.1), out trackComp, out trackBox);
trackWindow = trackComp.rect;
if (trackWindow.Width < 5 || trackWindow.Height < 5)
{
if (trackWindow.Width < 5)
{
trackWindow.X = trackWindow.X + trackWindow.Width / 2 - 3;
trackWindow.Width = 6;
}
if (trackWindow.Height < 5)
{
trackWindow.Y = trackWindow.Y + trackWindow.Height / 2 - 3;
//.........这里部分代码省略.........
示例4: inkFinder
public static void inkFinder()
{
Image<Hls, Byte> wrk = new Image<Hls, Byte>(img);
Image<Gray, Byte>[] channels = wrk.Split();
Image<Gray, Byte> imgHue = channels[0];
Image<Gray, Byte> imgLig = channels[1];
Image<Gray, Byte> imgSat = channels[2];
int bl = 0;
int wi = 0;
int col = 0;
ArrayList white = new ArrayList(); // HSL for white ones
//ArrayList black = new ArrayList(); // HSL for black ones
ArrayList other = new ArrayList();
//
// collect
for (int i = 0; i < wrk.Height; i++)
{
for (int j = 0; j < wrk.Width; j++)
{
int hue = (int)(imgHue[i, j].Intensity * 2);
float sat = (float)(imgSat[i, j].Intensity/255);
float lig = (float)(imgLig[i, j].Intensity/255);
//double border = -Math.Sqrt((1 - (lig - 0.5f) * (lig - 0.5f) / 0.34 / 0.34) * 0.84 * 0.84) + 1;
if (lig < 0.275 - 0.125 * sat || lig < 0.5 - 0.5 * sat / 0.6 || (sat < 0.18 && lig < 0.5))
{
//black.Add(new HSL(hue, sat, lig));
bl++;
continue;
}
if (lig > 0.725 + 0.125 * sat || lig > 0.5 + 0.5 * sat / 0.6 || (sat < 0.18 && lig >= 0.5))
{
white.Add(new HSL(hue, sat, lig));
wi++;
continue;
}
col++;
addToOther(other, 5, hue, sat, lig);
}
Console.WriteLine("Row - " + i + " : clusters - " + other.Count);
}
Console.WriteLine(bl + " " + wi + " " + col);
Console.WriteLine(other.Count);
ArrayList majCol = (ArrayList)other[0];
//
// find major color
for (int t = 0; t < other.Count; t++)
{
if (majCol.Count < ((ArrayList)other[t]).Count)
{
majCol = (ArrayList)other[t];
}
}
//
// find ranges
HSL firstCol = (HSL)majCol[0];
filterHue = new IntRange(firstCol.Hue, firstCol.Hue);
filterLig = new Range(firstCol.Luminance, firstCol.Luminance);
filterSat = new Range(firstCol.Saturation, firstCol.Saturation);
for (int q = 0; q < majCol.Count; q++)
{
HSL nextCol = (HSL)majCol[q];
if (nextCol.Hue < filterHue.Min) filterHue.Min = nextCol.Hue;
if (nextCol.Hue > filterHue.Max) filterHue.Max = nextCol.Hue;
if (nextCol.Luminance < filterLig.Min) filterLig.Min = nextCol.Luminance;
if (nextCol.Luminance > filterLig.Max) filterLig.Max = nextCol.Luminance;
if (nextCol.Saturation < filterSat.Min) filterSat.Min = nextCol.Saturation;
if (nextCol.Saturation > filterSat.Max) filterSat.Max = nextCol.Saturation;
}
//
// find fill color
int huesum = 0;
float satsum = 0;
float ligsum = 0;
for (int p = 0; p < white.Count; p++)
{
HSL wh = (HSL)white[p];
huesum += wh.Hue;
satsum += wh.Saturation;
ligsum += wh.Luminance;
}
fillColor = new HSL(huesum / white.Count, satsum / white.Count, ligsum / white.Count);
}
示例5: GetCenterMass
private static Point GetCenterMass(Image<Bgr, Byte> img)
{
//ImageViewer viewer = new ImageViewer();
//crunch down thresholds to find red-based blobs
//viewer.Image = img;
//viewer.ShowDialog();
splitImage = img.Split();
blue = splitImage[0].Copy();
green = splitImage[1].Copy();
red = splitImage[2].Copy();
//make the lowest levels bright
//blue._ThresholdBinary(new Gray(blueMin), new Gray(255));
// green._ThresholdBinary(new Gray(greenMin), new Gray(255));
//make the highest levels bright
red = red.Sub(blue.ThresholdBinary(new Gray(blueMax), new Gray(255)));
red = red.Sub(green.ThresholdBinary(new Gray(greenMax), new Gray(255)));
red._ThresholdBinary(new Gray(redMin), new Gray(255));
//build up mask
// gray = red.And(blue).And(green);
gray = red.Copy();
//do auto threshold if active
if (AUTO_ADJUST_THRESHOLD)
{
//find optimal redThreshold
bool found = false;
int currentRed = redMax;//start in middle to hopefully minimize search time
while (!found)
{
if (gray.GetSum().Intensity / (img.Width * img.Height) > maxRedPercent)
{
currentRed += 1;
}
else if (gray.GetSum().Intensity / (img.Width * img.Height) < minRedPercent)
{
currentRed -= 1;
}
else
{
found = true;
break;
}
if (currentRed > redMax || currentRed < redMin)//out of bounds, so no red found!
{
found = true;
System.Console.WriteLine("Current Red Percent out of bound: " + currentRed);
return new Point(Int32.MinValue, Int32.MinValue);
}
System.Console.WriteLine("Setting red to " + redMin);
red = splitImage[2].Copy();
red = red.Sub(blue.ThresholdBinary(new Gray(blueMax), new Gray(255)));
red = red.Sub(green.ThresholdBinary(new Gray(greenMax), new Gray(255)));
red._ThresholdBinary(new Gray(redMin), new Gray(255));
//build up mask
// gray = red.And(blue).And(green);
gray = red.Copy();
// viewer.Image = gray;
}
}
MCvMoments moments = gray.GetMoments(true);
MCvPoint2D64f momentGravity = moments.GravityCenter;
Point center = new Point((int)momentGravity.x - img.Width / 2, (int)momentGravity.y - img.Height / 2);
System.Console.WriteLine("Center at " + center.ToString());
System.Console.WriteLine("Intesity: " + gray.GetSum().Intensity + ", " + (gray.GetSum().Intensity / (img.Height * img.Width)));
// viewer.Image = gray;
//viewer.ShowDialog();
return center;
}
示例6: pobierzObraz
void pobierzObraz() //funkcja z nieskonczoną pętlą, działa w wątku th_pobierzObraz
{
for(;;)
{
//oryginalny obraz
obraz1 = kamerka.QueryFrame();
obraz1_mod = obraz1.Copy();
//składowa V i obraz binarny światła na obrazie z kamery
Image<Hsv, Byte> obraz1_hsv = new Image<Hsv, byte>(obraz1.Bitmap);
CvInvoke.cvCvtColor(obraz1, obraz1_hsv, Emgu.CV.CvEnum.COLOR_CONVERSION.BGR2HSV);
obraz1_v = obraz1_hsv.Split()[2];
this.Invoke((MethodInvoker)delegate
{
bin_obraz1_bialy = obraz1_v.InRange(new Gray(265 - tbCzulosc.Value), new Gray(240 + tbCzulosc.Value));
CvInvoke.cvErode(bin_obraz1_bialy, bin_obraz1_bialy, rect_12, 5);
CvInvoke.cvDilate(bin_obraz1_bialy, bin_obraz1_bialy, rect_6, 5);
Status.pb2.Image = bin_obraz1_bialy.Bitmap;
});
//składowa V i obraz binarny światła na tle
Image<Hsv, Byte> tlo_hsv = new Image<Hsv, byte>(tlo.Bitmap);
CvInvoke.cvCvtColor(tlo, tlo_hsv, Emgu.CV.CvEnum.COLOR_CONVERSION.BGR2HSV);
tlo_v = tlo_hsv.Split()[2];
bin_tlo_bialy = tlo_v.InRange(new Gray(250), new Gray(255));
CvInvoke.cvErode(bin_tlo_bialy, bin_tlo_bialy, rect_12, 5);
CvInvoke.cvDilate(bin_tlo_bialy, bin_tlo_bialy, rect_6, 5);
Status.pb4.Image = bin_tlo_bialy.Bitmap;
//różnica powyższych
Image<Gray, Byte> bin_diff = new Image<Gray, byte>(tlo.Bitmap);
bin_diff = bin_obraz1_bialy - bin_tlo_bialy;
Status.pb5.Image = bin_diff.Bitmap;
//kontury na swietle
MemStorage mem = new MemStorage();
Contour<Point> kontur_all = bin_diff.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, mem);
Contour<Point> kontur = null; //kontur ktory bedzie brany pod uwage
//okreslanie najwiekszego konturu
while (kontur_all != null)
{
double rozmiar = 0;
if (kontur != null) rozmiar = kontur.Area;
if (kontur_all.Area > rozmiar)
kontur = kontur_all;
kontur_all = kontur_all.HNext;
}
if (kontur != null && kontur.Area > 500)
{
kontur = kontur.ApproxPoly(kontur.Perimeter * 0.0025, mem);
obraz1_mod.Draw(kontur, new Bgr(Color.Red), 12);
st = true;
}
else
{
st = false;
}
//wyswietlanie obrazu z kamerki z naznaczonymi konturami
pb1.Image = obraz1_mod.Bitmap;
//wyswietlanie tla
Status.pb3.Image = tlo.Bitmap;
}
}
示例7: MainLoop
private void MainLoop()
{
CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
Image<Gray, byte>[] channels;
Image<Gray, byte> HistImg1 = new Image<Gray, byte>(500, 500);
Image<Gray, byte> HistImg2 = new Image<Gray, byte>(500, 500);
Image<Gray, byte> ProbImage;
DenseHistogram hist1 = new DenseHistogram(new int[] { 10, 10 }, new RangeF[] { new RangeF(0, 255), new RangeF(0, 255) });
DenseHistogram hist2 = new DenseHistogram(new int[] { 10, 10 }, new RangeF[] { new RangeF(0, 255), new RangeF(0, 255) });
MCvConnectedComp comp;
MCvTermCriteria criteria = new MCvTermCriteria(10, 1);
MCvBox2D box;
while (true)
{
CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
if (OnSettingArea && TrackArea != Rectangle.Empty)
{
CurrentFrame.ROI = TrackArea;
channels = CurrentFrame.Split();
hist1.Calculate(new Image<Gray, byte>[] { channels[0], channels[1] }, false, null);
CurrentFrame.Not().CopyTo(CurrentFrame);
CurrentFrame.ROI = Rectangle.Empty;
CurrentFrame.Draw(TrackArea, new Hsv(100, 100, 100), 2);
imageBox1.Image = CurrentFrame;
}
else
{
if (TrackArea != Rectangle.Empty)
{
channels = CurrentFrame.Split();
ProbImage = hist1.BackProject<byte>(new Image<Gray, byte>[] { channels[0], channels[1] });
imageBox_Hist2.Image = ProbImage.Convert<Gray, byte>();
lock (LockObject)
{
if (TrackArea.Height * TrackArea.Width > 0)
{
CvInvoke.cvCamShift(ProbImage, TrackArea, criteria, out comp, out box);
TrackArea = comp.rect;
CurrentFrame.Draw(box, new Hsv(100, 100, 100), 2);
}
/**
ResetContourPoints();
for (int i = 0; i < 60; i++)
{
ProbImage.Snake(ContourPoints, (float)1.0, (float)0.5, (float)1.5, new Size(17, 17), criteria, true);
}
CurrentFrame.DrawPolyline(ContourPoints, true, new Hsv(100, 100, 100), 2);
*/
}
}
imageBox1.Image = CurrentFrame;
//calculate histogram;
//channels = CurrentFrame.Split();
//hist2.Calculate(new Image<Gray, byte>[] { channels[0], channels[1] }, false, null);
//hist2.Normalize(1);
//HistImg1.SetZero();
//DrawHist2D(HistImg1, hist1);
//imageBox_Hist1.Image = HistImg1;
}
}
}
示例8: TestSplitMerge
public void TestSplitMerge()
{
Image<Bgr, Byte> img1 = new Image<Bgr, byte>(301, 234);
img1.SetRandUniform(new MCvScalar(), new MCvScalar(255, 255, 255));
Image<Gray, Byte>[] channels = img1.Split();
Image<Bgr, Byte> img2 = new Image<Bgr, byte>(channels);
EmguAssert.IsTrue(img1.Equals(img2));
}
示例9: TestBgra
public void TestBgra()
{
Image<Bgra, Byte> img = new Image<Bgra, byte>(100, 100);
img.SetValue(new Bgra(255.0, 120.0, 0.0, 120.0));
Image<Gray, Byte>[] channels = img.Split();
}
示例10: TestBgrSplit
public void TestBgrSplit()
{
using (Image<Bgr, Byte> img = new Image<Bgr, byte>(100, 100, new Bgr(0, 100, 200)))
{
Image<Gray, Byte>[] channels = img.Split();
EmguAssert.AreEqual(img.NumberOfChannels, channels.Length);
}
}
示例11: RGBFilter
private Image<Gray, byte> RGBFilter(Image<Bgr, byte> input, //разбиваем на каналы и фильтруем по цвету
Gray Rmin, Gray Rmax,
Gray Gmin, Gray Gmax,
Gray Bmin, Gray Bmax)
{
Image<Gray, byte>[] channels = input.Split();
channels[0] = channels[0].InRange(Bmin, Bmax);
channels[1] = channels[1].InRange(Gmin, Gmax);
channels[2] = channels[2].InRange(Rmin, Rmax);
Image<Gray, byte> result = channels[0].And(channels[1]);
result = result.And(channels[2]);
return result;
}
示例12: button10_Click
private void button10_Click(object sender, EventArgs e)
{
Mixedflag = 0;
if (radioButton1.Checked)
{
size = 5;
}
else if (radioButton2.Checked)
{
size = 10;
}
else if (radioButton3.Checked)
{
size = 10;
Mixedflag = 1;
}
var = new Image<Bgr, byte>(path);
if (Mixedflag==0)
{
int hrsize = (var.Height) % (size);
int wrsize = (var.Width) % (size);
Bitmap re = new Bitmap(var.ToBitmap(), var.Width - wrsize, var.Height - hrsize);
var = new Image<Bgr,Byte>(re.Width,re.Height);
var = new Image<Bgr, Byte>(re);
grayvar = new Image<Gray, Byte>(re.Width, re.Height);
grayvar = new Image<Gray, Byte>(re);
imageBox1.Image = var;
imageBox1.Show();
//
progressBar2.Maximum = (grayvar.Height * grayvar.Width) / (size * size);
progressBar2.Visible = true;
progressBar2.Value = 0;
//progressBar1.Visible = true;
Mosaic = new Image<Bgr, Byte>(var.Size);
GMosaic = new Image<Gray, Byte>(grayvar.Size);
if (Isgray == 0)
{
Mosaic = new Image<Bgr, Byte>(var.Size);
var.CopyTo(Mosaic);
}
else
{
GMosaic = new Image<Gray, Byte>(grayvar.Size);
grayvar.CopyTo(GMosaic);
}
if (single == 1)
{
Image<Bgr, Byte> tochange = new Image<Bgr, Byte>(path);
Bitmap bit = new Bitmap(path, true); //Open file
Bitmap resized = new Bitmap(bit, size, size); //Resize file
imageBox1.Image = tochange;
tochange = new Image<Bgr,Byte>(resized);
Mosaic = new Image<Bgr, Byte>(var.Size);
var.CopyTo(Mosaic);
Image<Gray, Byte>[] channels = tochange.Split();
for (int i = 0; i < (grayvar.Height ); i += size)
for (int j = 0; j < grayvar.Width; j += size)
{
progressBar2.Value++;
float amin=1;
float max=1;
Rectangle roi = var.ROI;
var.ROI = new Rectangle(j, i, size, size);
//Sets the region of interest of the image to the abpve by which the image is treated just as that rectangle and nothing else
Bitmap smallbox = var.ToBitmap();
Image<Bgr, float> ismallbox = new Image<Bgr, float>(smallbox);
channels = tochange.Split();
for (int k = 0; k < channels.Length; k++)
{
max = 0;
amin = 300;
for (int a = 0; a < ismallbox.Height; a++)
{
for (int b = 0; b < ismallbox.Width; b++)
{
if ((int)ismallbox.Data[a, b, k] > max)
{
max = (int)ismallbox.Data[a, b, k];
}
if ((int)ismallbox.Data[a, b, k] < amin)
{
amin = (int)ismallbox.Data[a, b, k];
}
}
}
CvInvoke.cvNormalize(channels[k], channels[k], amin, max, Emgu.CV.CvEnum.NORM_TYPE.CV_MINMAX, IntPtr.Zero);
}
Image<Bgr, Byte> n = new Image<Bgr, byte>(channels);
resized = new Bitmap(n.Bitmap, size, size); //Resize file
//.........这里部分代码省略.........
示例13: DetectTargets_work
/// <summary>
/// Worker thread, does the actual detection of targets in a backgroundworker thread.
/// </summary>
/// <param name="sender"></param>
/// <param name="e">contains parameters passed to the thread, should only be a Bitmap.</param>
private void DetectTargets_work(Object sender, DoWorkEventArgs e)
{
Bitmap img = (Bitmap)e.Argument;
/* using modified emguCV example code to try and find targets*/
Image<Hsv, Byte> circleImage = new Image<Hsv, byte>(new Bitmap(img)).PyrDown().PyrUp();
Image<Gray, Byte> gray = circleImage.Convert<Gray, byte>().PyrDown().PyrUp();
Gray cannyThreshold = new Gray(THRESHOLD_MAX);
Gray circleAccumulatorThreshold = new Gray(THRESHOLD_MIN);
CircleF[] circles = gray.HoughCircles(
cannyThreshold,
circleAccumulatorThreshold,
ACCUMULATOR_RESOLUTION, //Resolution of the accumulator used to detect centers of the circles
MIN_DISTANCE, //min distance between circles
MIN_RADIUS, //min radius of circles
MAX_RADIUS //max radius of circles
)[0]; //Get the circles from the first channel
lock (_lock)
{
_targets.Clear();
Image<Gray, Byte>[] channels = circleImage.Split();
try
{
//channels[0] is the mask for hue less than 20 or larger than 160
CvInvoke.cvInRangeS(channels[0], new MCvScalar(20), new MCvScalar(160), channels[0]);
channels[0]._Not();
//channels[1] is the mask for satuation of at least 10, this is mainly used to filter out white pixels
channels[1]._ThresholdBinary(new Gray(10), new Gray(255.0));
CvInvoke.cvAnd(channels[0], channels[1], channels[0], IntPtr.Zero);
}
finally
{
channels[1].Dispose();
channels[2].Dispose();
}
foreach (CircleF t in circles)
{
bool friend = false;
Gray pixelColor = (channels[0])[Convert.ToInt32(t.Radius / 2), Convert.ToInt32(t.Radius / 2)];
Gray test = new Gray(126);
if (test.Intensity < pixelColor.Intensity)
{
friend = true;
}
Tuple<Double, Double, Double, Double, Boolean> t2 = new Tuple<Double, Double, Double, Double, Boolean>(t.Center.X, 0, t.Center.Y, t.Radius, friend);
_targets.Add(t2);
}
}
if(ImageProcessed != null){
ImageProcessed(this, null);
}
}
示例14: getKeyboardInput
void getKeyboardInput()
{
char inKey;
int series = 92;
img = fileImage.Clone();
gray = img.Convert<Gray, Byte>();
gray = gray.PyrDown().PyrUp();
Image<Gray, Byte> splitImage = img.Split()[2];
//find optimal redThreshold
bool found1 = false;
while (!found1)
{
splitImage = img.Split()[2].ThresholdToZero(new Gray(redThreshold));
if (splitImage.GetSum().Intensity > 255 * 800)
{
redThreshold += 1;
}
else if (splitImage.GetSum().Intensity < 255 * 500)
{
redThreshold -= 1;
}
else
{
found1 = true;
}
System.Console.WriteLine("Setting red to " + redThreshold);
}
splitImage = splitImage.ThresholdBinary(new Gray(150), new Gray(255));
gray = splitImage;
//gray = gray.ThresholdBinary(new Gray(grayThreshold), new Gray(grayMax));
System.Console.WriteLine("Threshold: " + threshold + " Threshold Linking: " + thresholdLinking + " Gray Threshold: " + redThreshold + " Image: " + fileName);
do
{
if (!Console.KeyAvailable)
continue;
inKey = Console.ReadKey().KeyChar;
switch (inKey)
{
case '8':
threshold += 5;
break;
case '2':
threshold -= 5;
break;
case '4':
thresholdLinking -= 5;
break;
case '6':
thresholdLinking += 5;
break;
case '1':
redThreshold -= 5;
break;
case '3':
redThreshold += 5;
break;
case '9':
series++;
break;
case '7':
series--;
break;
}
//gray = new Image<Gray, Byte>("C:/RoboSub/RoboImagesTest2/" + series + "c.png");
// gray = gray.PyrDown().PyrUp();
//img = new Image<Bgr, Byte>("C:/RoboSub/RoboImagesTest2/" + series + "c.png");
img = fileImage.Clone();
gray = img.Convert<Gray, Byte>();
gray = gray.PyrDown().PyrUp();
splitImage = img.Split()[2];
//find optimal redThreshold
bool found = false;
while (!found)
{
splitImage = img.Split()[2];
splitImage = splitImage.ThresholdToZero(new Gray(redThreshold));
if (splitImage.GetSum().Intensity > 255 * 100)
{
redThreshold += 1;
}
else if (splitImage.GetSum().Intensity < 255 * 10)
{
redThreshold -= 1;
}
else
{
found = true;
}
System.Console.WriteLine("Setting red to " + redThreshold);
}
gray = splitImage;
//gray = gray.ThresholdBinary(new Gray(grayThreshold), new Gray(grayMax));
//.........这里部分代码省略.........
示例15: UpdateHue
private void UpdateHue(Image<Bgr, Byte> image)
{
// release previous image memory
if (hsv != null) hsv.Dispose();
hsv = image.Convert<Hsv, Byte>();
// Drop low saturation pixels
mask = hsv.Split()[1].ThresholdBinary(new Gray(60), new Gray(255));
CvInvoke.cvInRangeS(hsv, new MCvScalar(0, 30, Math.Min(10, 255), 0),
new MCvScalar(180, 256, Math.Max(10, 255), 0), mask);
// Get Hue
hue = hsv.Split()[0];
}