本文整理汇总了C#中Mat.ToIplImage方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.ToIplImage方法的具体用法?C# Mat.ToIplImage怎么用?C# Mat.ToIplImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.ToIplImage方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Update
//---------------------------------------------------------
// 関数名 : Update
// 機能 : 1フレーム毎に呼び出される
// 引数 : なし
// 戻り値 : なし
//---------------------------------------------------------
void Update()
{
using (Mat m_img = new Mat())
{
// Webカメラから画像を取得する
video.Read(m_img);
// Mat から IplImage に変換
using (var i_img = m_img.ToIplImage())
{
// OpenCVのデータがBGRなのでHSVに変換
Cv.CvtColor(i_img, i_img, ColorConversion.BgrToHsv);
// 画像のヒストグラム平滑化
Cv.Smooth(i_img, i_img, SmoothType.Median, 5, 0, 0, 0);
// 画像の任意の点からデータ取得
getPointData(i_img, pointResult);
// 点情報から画像を生成(デバッグ用)
getPointImage(i_img, pointResult);
// 画像を画面に表示
using (var r_img = Cv2.CvArrToMat(i_img))
{
// 画像を jpeg にエンコード
texture.LoadImage(r_img.ImEncode(ext));
texture.Apply();
}
}
}
}
示例2: StereoCorrespondence
public StereoCorrespondence()
{
// cvFindStereoCorrespondenceBM + cvFindStereoCorrespondenceGC
// ブロックマッチング, グラフカットの両アルゴリズムによるステレオマッチング
// 入力画像の読み込み
using (IplImage imgLeft = new IplImage(Const.ImageTsukubaLeft, LoadMode.GrayScale))
using (IplImage imgRight = new IplImage(Const.ImageTsukubaRight, LoadMode.GrayScale))
{
// 視差画像, 出力画像の領域を確保
using (IplImage dispBM = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (IplImage dispLeft = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (IplImage dispRight = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (IplImage dstBM = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (IplImage dstGC = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (IplImage dstAux = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (Mat dstSGBM = new Mat())
{
// 距離計測とスケーリング
int sad = 3;
using (CvStereoBMState stateBM = new CvStereoBMState(StereoBMPreset.Basic, 16))
using (CvStereoGCState stateGC = new CvStereoGCState(16, 2))
using (StereoSGBM sgbm = new StereoSGBM()
{
MinDisparity = 0,
NumberOfDisparities = 32,
PreFilterCap = 63,
SADWindowSize = sad,
P1 = 8 * imgLeft.NChannels * sad * sad,
P2 = 32 * imgLeft.NChannels * sad * sad,
UniquenessRatio = 10,
SpeckleWindowSize = 100,
SpeckleRange = 32,
Disp12MaxDiff = 1,
FullDP = false,
})
{
Cv.FindStereoCorrespondenceBM(imgLeft, imgRight, dispBM, stateBM); // stateBM.FindStereoCorrespondence(imgLeft, imgRight, dispBM);
Cv.FindStereoCorrespondenceGC(imgLeft, imgRight, dispLeft, dispRight, stateGC, false); // stateGC.FindStereoCorrespondence(imgLeft, imgRight, dispLeft, dispRight, false);
Cv.FindStereoCorrespondence(imgLeft, imgRight, DisparityMode.Birchfield, dstAux, 50, 25, 5, 12, 15, 25);
sgbm.FindCorrespondence(new Mat(imgLeft), new Mat(imgRight), dstSGBM);
Cv.ConvertScale(dispBM, dstBM, 1);
Cv.ConvertScale(dispLeft, dstGC, -16);
Cv.ConvertScale(dstAux, dstAux, 16);
dstSGBM.ConvertTo(dstSGBM, dstSGBM.Type, 32, 0);
using (new CvWindow("Stereo Correspondence (BM)", dstBM))
using (new CvWindow("Stereo Correspondence (GC)", dstGC))
using (new CvWindow("Stereo Correspondence (cvaux)", dstAux))
using (new CvWindow("Stereo Correspondence (SGBM)", dstSGBM.ToIplImage()))
{
Cv.WaitKey();
}
}
}
}
}
示例3: Run
public void Run()
{
// Load left&right images
using (var imgLeft = new IplImage(FilePath.Image.TsukubaLeft, LoadMode.GrayScale))
using (var imgRight = new IplImage(FilePath.Image.TsukubaRight, LoadMode.GrayScale))
{
// output image buffers
using (var dispBM = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (var dispLeft = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (var dispRight = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (var dstBM = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (var dstGC = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (var dstAux = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (var dstSGBM = new Mat())
{
// measures distance and scales
const int sad = 3;
using (var stateBM = new CvStereoBMState(StereoBMPreset.Basic, 16))
using (var stateGC = new CvStereoGCState(16, 2))
using (var sgbm = new StereoSGBM() // C++
{
MinDisparity = 0,
NumberOfDisparities = 32,
PreFilterCap = 63,
SADWindowSize = sad,
P1 = 8 * imgLeft.NChannels * sad * sad,
P2 = 32 * imgLeft.NChannels * sad * sad,
UniquenessRatio = 10,
SpeckleWindowSize = 100,
SpeckleRange = 32,
Disp12MaxDiff = 1,
FullDP = false,
})
{
Cv.FindStereoCorrespondenceBM(imgLeft, imgRight, dispBM, stateBM);
Cv.FindStereoCorrespondenceGC(imgLeft, imgRight, dispLeft, dispRight, stateGC, false);
Cv.FindStereoCorrespondence(imgLeft, imgRight, DisparityMode.Birchfield, dstAux, 50, 25, 5, 12, 15, 25); // cvaux
sgbm.Compute(new Mat(imgLeft), new Mat(imgRight), dstSGBM);
Cv.ConvertScale(dispBM, dstBM, 1);
Cv.ConvertScale(dispLeft, dstGC, -16);
Cv.ConvertScale(dstAux, dstAux, 16);
dstSGBM.ConvertTo(dstSGBM, dstSGBM.Type(), 32, 0);
using (new CvWindow("Stereo Correspondence (BM)", dstBM))
using (new CvWindow("Stereo Correspondence (GC)", dstGC))
using (new CvWindow("Stereo Correspondence (cvaux)", dstAux))
using (new CvWindow("Stereo Correspondence (SGBM)", dstSGBM.ToIplImage()))
{
Cv.WaitKey();
}
}
}
}
}
示例4: PixelAccess
private void PixelAccess()
{
using (Mat mat = new Mat(128, 128, MatrixType.U8C1))
{
for (int y = 0; y < mat.Rows; y++)
{
for (int x = 0; x < mat.Cols; x++)
{
mat.Set<byte>(y, x, (byte)(y + x));
}
}
using (new CvWindow("PixelAccess", mat.ToIplImage()))
{
Cv.WaitKey();
}
}
}
示例5: SampleCpp
/// <summary>
/// sample of new C++ style wrapper
/// </summary>
private void SampleCpp()
{
// (1)画像の読み込み
using (Mat imgGray = new Mat(Const.ImageGoryokaku, LoadMode.GrayScale))
using (Mat imgStd = new Mat(Const.ImageGoryokaku, LoadMode.Color))
using (Mat imgProb = imgStd.Clone())
{
// ハフ変換のための前処理
CvCpp.Canny(imgGray, imgGray, 50, 200, ApertureSize.Size3, false);
// (3)標準的ハフ変換による線の検出と検出した線の描画
CvLineSegmentPolar[] segStd = CvCpp.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);
int limit = Math.Min(segStd.Length, 10);
for (int i = 0; i < limit; i++ )
{
float rho = segStd[i].Rho;
float theta = segStd[i].Theta;
double a = Math.Cos(theta);
double b = Math.Sin(theta);
double x0 = a * rho;
double y0 = b * rho;
CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
imgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
}
// (4)確率的ハフ変換による線分の検出と検出した線分の描画
CvLineSegmentPoint[] segProb = CvCpp.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
foreach (CvLineSegmentPoint s in segProb)
{
imgProb.Line(s.P1, s.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
}
// (5)検出結果表示用のウィンドウを確保し表示する
using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, imgStd.ToIplImage()))
using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, imgProb.ToIplImage()))
{
CvWindow.WaitKey(0);
}
}
}
示例6: SampleCpp
/// <summary>
/// sample of new C++ style wrapper
/// </summary>
private void SampleCpp()
{
// (1) Load the image
using (Mat imgGray = new Mat(FilePath.Goryokaku, LoadMode.GrayScale))
using (Mat imgStd = new Mat(FilePath.Goryokaku, LoadMode.Color))
using (Mat imgProb = imgStd.Clone())
{
// Preprocess
Cv2.Canny(imgGray, imgGray, 50, 200, 3, false);
// (3) Run Standard Hough Transform
CvLineSegmentPolar[] segStd = Cv2.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);
int limit = Math.Min(segStd.Length, 10);
for (int i = 0; i < limit; i++ )
{
// Draws result lines
float rho = segStd[i].Rho;
float theta = segStd[i].Theta;
double a = Math.Cos(theta);
double b = Math.Sin(theta);
double x0 = a * rho;
double y0 = b * rho;
CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
imgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
}
// (4) Run Probabilistic Hough Transform
CvLineSegmentPoint[] segProb = Cv2.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
foreach (CvLineSegmentPoint s in segProb)
{
imgProb.Line(s.P1, s.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
}
// (5) Show results
using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, imgStd.ToIplImage()))
using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, imgProb.ToIplImage()))
{
CvWindow.WaitKey(0);
}
}
}
示例7: BgSubtractorMOG
public BgSubtractorMOG()
{
using (CvCapture capture = new CvCapture(Const.MovieHara)) // specify your movie file
using (BackgroundSubtractorMOG mog = new BackgroundSubtractorMOG())
using (CvWindow windowSrc = new CvWindow("src"))
using (CvWindow windowDst = new CvWindow("dst"))
{
IplImage imgFrame;
using (Mat imgFg = new Mat())
while ((imgFrame = capture.QueryFrame()) != null)
{
mog.Run(new Mat(imgFrame, false), imgFg, 0.01);
windowSrc.Image = imgFrame;
windowDst.Image = imgFg.ToIplImage();
Cv.WaitKey(50);
}
}
}
示例8: DoTracking
// FaceTracking
void DoTracking()
{
//while (running)
//{
try
{
if (kinect.GetDepthRaw())
{
//lock (this)
//{
src = DoDepthBuffer(kinect.usersDepthMap, KinectWrapper.GetDepthWidth(), KinectWrapper.GetDepthHeight());
roi = src.Clone(new OpenCvSharp.CPlusPlus.Rect(roiX, roiY, roiW, roiH));
roi.ConvertTo(roi, OpenCvSharp.CPlusPlus.MatType.CV_8U, 255.0 / 32000.0);
Cv2.Subtract(new Mat(roiH, roiW, MatType.CV_8UC1, new Scalar(255)), roi, roi);
double threshMax = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMax << 3));
double threshMin = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMin << 3));
roi = roi.Threshold(threshMin, 255.0, ThresholdType.ToZeroInv);
roi = roi.Threshold(threshMax, 255.0, ThresholdType.ToZero);
// Flip up/down dimension and right/left dimension
if (!FlipUpDownAxis && FlipLeftRightAxis)
roi.Flip(FlipMode.XY);
else if (!FlipUpDownAxis)
roi.Flip(FlipMode.X);
else if (FlipLeftRightAxis)
roi.Flip(FlipMode.Y);
//Apply ellliptical mask
Mat ellipseMask = new Mat(roi.Rows, roi.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8U, new Scalar(0.0));
Cv2.Ellipse(ellipseMask, new Point(ellipseMaskCenterX, ellipseMaskCenterY), new Size(axisMaskX, axisMaskY), maskAngle, maskStartAngle, maskEndAngle, new Scalar(255.0), -1);
Cv2.BitwiseAnd(roi, ellipseMask, roi);
//Remove noise by morphologyEx
Mat kernel = Cv2.GetStructuringElement(StructuringElementShape.Ellipse, new Size(3, 3));
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Open, kernel);
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Close, kernel);
//Subtract background
if (first)
{
bg = roi.Clone();
//bg = bg.Blur(new Size(smoothBlur, smoothBlur));
first = false;
}
fg = bg.Clone();
//roi = roi.Blur(new Size(smoothBlur, smoothBlur));
Mat subMask = roi.Clone();
subMask = subMask.Threshold(smThresh, 255.0, ThresholdType.ToZero);
//Cv2.ImShow("sm",subMask);
bg.CopyTo(roi, subMask);
OpenCvSharp.Cv.AbsDiff(roi.ToCvMat(), bg.ToCvMat(), fg.ToCvMat());
//Threshold foreground image
fgthresh = fg.Threshold(threshold, 255.0, ThresholdType.Binary);
fgthresh = fgthresh.Blur(new Size(smoothBlur, smoothBlur));
//Detect Blobs
Mat roiToImg = new Mat(roi.Cols, roi.Rows, MatType.CV_8UC3);
Mat threshToImg = fgthresh.Clone();
Cv2.Merge(new Mat[] { roi, roi, roi }, roiToImg);
IplImage showImg = roiToImg.ToIplImage();
IplImage fgthreshImg = threshToImg.ToIplImage();
OpenCvSharp.Blob.CvBlobLib.Label(fgthreshImg, blobs);
OpenCvSharp.Blob.CvBlobLib.FilterByArea(blobs, blobMinArea, blobMaxArea);
OpenCvSharp.Blob.CvBlobLib.RenderBlobs(blobs, fgthreshImg, showImg, RenderBlobsMode.Color | RenderBlobsMode.Centroid);
UpdateTracks(blobs, tracks, blobMinDistance, blobMaxLife);
//OpenCvSharp.Blob.CvBlobLib.RenderTracks(tracks, fgthreshImg, showImg, RenderTracksMode.BoundingBox | RenderTracksMode.Id);
RenderTracks(showImg);
//Cv.ShowImage("thres", fgthreshImg);
Cv.ShowImage("showBlob", showImg);
//Check Blob Actions
//Debug.Log(tracks.Count);
//}
}
}
catch (System.Exception e)
{
//throw e;
Debug.Log(e.Message + " " + e.StackTrace);
}
//}
}
示例9: getHandPicturesInRange
public IplImage getHandPicturesInRange(IplImage source)
{
//白黒変換
IplImage image = source.Clone();
IplImage gray = Cv.CreateImage(new CvSize(image.Width, image.Height), BitDepth.U8, 1);
Cv.CvtColor(image, image, ColorConversion.RgbToHsv);
Cv.Smooth(image, image, SmoothType.Median, 7);
Scalar s1 = new Scalar(130, 50, 50);
Scalar s2 = new Scalar(360, 255, 255);
Cv.InRangeS(image, s1, s2, gray);
Cv.Threshold(gray, gray, 0, 255, ThresholdType.Binary);
//Cv.Canny(gray, gray, 100, 200, ApertureSize.Size3);
//Cv.Laplace(gray, gray, ApertureSize.Size1);
Mat sourceMat = new Mat(source);
Mat grayMat = new Mat(gray);
Mat outputMat = new Mat();
Cv2.BitwiseAnd(sourceMat, sourceMat, outputMat, grayMat);
IplImage returnIPL = outputMat.ToIplImage();
image.Dispose();
gray.Dispose();
sourceMat.Dispose();
grayMat.Dispose();
outputMat.Dispose();
return returnIPL;
}
示例10: getHandPicturesGMG
public IplImage getHandPicturesGMG(IplImage source)
{
Mat frame = new Mat(), foregroundMask = new Mat(), output = new Mat();
frame = new Mat(source);
backgroundSubtractor.Run(frame, foregroundMask);
Cv2.BitwiseAnd(frame, frame, output, foregroundMask);
IplImage returnIPL = output.ToIplImage();
frame.Dispose();
foregroundMask.Dispose();
output.Dispose();
return returnIPL;
}
示例11: convertMatToIplImage
//---------------------------------------------------------
// 関数名 : convertMatToIplImage
// 機能 : MatからIplImageへ変換
// 引数 : なし
// 戻り値 : img/カメラ画像
//---------------------------------------------------------
private IplImage convertMatToIplImage(Mat m_img)
{
i_img = m_img.ToIplImage();
return i_img;
}
示例12: fnc
private IplImage fnc(Mat mat,CvScalar scalar)
{
mat = new Mat(new CvSize(50, 50), MatrixType.U8C3, scalar);
IplImage ipl = mat.ToIplImage();
return ipl;
}
示例13: GetCenterPointofLED
//輪郭抽出して中心座標取得
Point GetCenterPointofLED(Mat grayImage)
{
OpenCvSharp.CPlusPlus.Point centerPoint = new OpenCvSharp.CPlusPlus.Point();
IplImage grayIpl = grayImage.ToIplImage().Clone();
IplImage calibIpl = new IplImage(grayIpl.Size, BitDepth.U8, 3);
//中心の検出
CvBlobs blobs = new CvBlobs();
blobs.Label(grayIpl);
//blobs.FilterByArea(20, 1500);
CvBlob blob = blobs.LargestBlob();
try
{
if (blob != null)
{
centerPoint = new Point(blob.Centroid.X, blob.Centroid.Y);
blobs.RenderBlobs(grayIpl, calibIpl);
}
}catch{
Console.WriteLine("eroor:counter");
}
this.CalibrationImage = new Mat(calibIpl);
Console.WriteLine(centerPoint);
return centerPoint;
}
示例14: MakeImagesForArticle
private static void MakeImagesForArticle()
{
var resizeK = 0.2;
var dir = "Example/";
var src = new Mat("0.bmp");
var src_g = new Mat("0.bmp", LoadMode.GrayScale);
var src_1 = new Mat("1.bmp");
var src_1_g = new Mat("1.bmp", LoadMode.GrayScale);
var background = new Mat("background.bmp");
var background_g = new Mat("background.bmp", LoadMode.GrayScale);
src.Resize(resizeK).ImWrite(dir + "0.png");
src_g.Resize(resizeK).ImWrite(dir + "0 g.png");
src_g.ThresholdStairs().Resize(resizeK).ImWrite(dir + "0 g th.png");
var canny = new Mat();
Cv2.Canny(src_g, canny, 50, 200);
canny.Resize(0.5).ImWrite(dir + "0 canny.png");
Mat[] src_channels;
Cv2.Split(src, out src_channels);
for (var i = 0; i < src_channels.Length; ++i)
{
var channels = Enumerable.Range(0, src_channels.Length).Select(j => new Mat(src_channels[0].Rows, src_channels[0].Cols, src_channels[0].Type())).ToArray();
channels[i] = src_channels[i];
var dst = new Mat();
Cv2.Merge(channels, dst);
dst.Resize(resizeK).ImWrite(dir + string.Format("0 ch{0}.png", i));
src_channels[i].ThresholdStairs().Resize(resizeK).ImWrite(dir + string.Format("0 ch{0} th.png", i));
}
if (true)
{
src.Resize(0.4).ImWrite(dir + "0.png");
src_1.Resize(0.4).ImWrite(dir + "1.png");
background.Resize(0.4).ImWrite(dir + "bg.png");
var dst_01 = new Mat();
Cv2.Absdiff(src, src_1, dst_01);
dst_01.Resize(resizeK).ImWrite(dir + "01.png");
dst_01.Cut(new Rect(50, src.Height * 4 / 5 - 50, src.Width / 5, src.Height / 5)).ImWrite(dir + "01 part.png");
dst_01.Cut(new Rect(50, src.Height * 4 / 5 - 50, src.Width / 5, src.Height / 5)).CvtColor(ColorConversion.RgbToGray).ImWrite(dir + "01 g.png");
dst_01.CvtColor(ColorConversion.RgbToGray).ThresholdStairs().Resize(resizeK).ImWrite(dir + "01 g th.png");
var dst_01_g = new Mat();
Cv2.Absdiff(src_g, src_1_g, dst_01_g);
dst_01_g.Cut(new Rect(50, src.Height * 4 / 5 - 50, src.Width / 5, src.Height / 5)).ImWrite(dir + "0g1g.png");
dst_01_g.ThresholdStairs().Resize(resizeK).ImWrite(dir + "0g1g th.png");
}
if (true)
{
var dst_0b = new Mat();
Cv2.Absdiff(src, background, dst_0b);
dst_0b.Resize(0.6).ImWrite(dir + "0b.png");
var dst_0b_g = new Mat();
Cv2.Absdiff(src_g, background_g, dst_0b_g);
dst_0b_g.Resize(0.3).ImWrite(dir + "0b g.png");
dst_0b_g.ThresholdStairs().Resize(0.3).ImWrite(dir + "0b g th.png");
}
if (true)
{
var hsv_src = new Mat();
Cv2.CvtColor(src, hsv_src, ColorConversion.RgbToHsv);
var hsv_background = new Mat();
Cv2.CvtColor(background, hsv_background, ColorConversion.RgbToHsv);
var hsv_background_channels = hsv_background.Split();
var hsv_src_channels = hsv_src.Split();
if (true)
{
var all = new Mat(src.ToIplImage(), true);
for (var i = 0; i < hsv_src_channels.Length; ++i)
{
hsv_src_channels[i].CvtColor(ColorConversion.GrayToRgb).CopyTo(all, new Rect(i * src.Width / 3, src.Height / 2, src.Width / 3, src.Height / 2));
}
src_g.CvtColor(ColorConversion.GrayToRgb).CopyTo(all, new Rect(src.Width / 2, 0, src.Width / 2, src.Height / 2));
all.Resize(0.3).ImWrite(dir + "all.png");
}
foreach (var pair in new[] { "h", "s", "v" }.Select((channel, index) => new { channel, index }))
{
var diff = new Mat();
Cv2.Absdiff(hsv_src_channels[pair.index], hsv_background_channels[pair.index], diff);
diff.Resize(0.3).With_Title(pair.channel).ImWrite(dir + string.Format("0b {0}.png", pair.channel));
diff.ThresholdStairs().Resize(0.3).ImWrite(dir + string.Format("0b {0} th.png", pair.channel));
hsv_src_channels[pair.index].Resize(resizeK).With_Title(pair.channel).ImWrite(dir + string.Format("0 {0}.png", pair.channel));
foreach (var d in new[] { -100, -50, 50, 100 })
//.........这里部分代码省略.........