本文整理汇总了C#中Mat.Channels方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.Channels方法的具体用法?C# Mat.Channels怎么用?C# Mat.Channels使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.Channels方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: CaliblationUpdate
//キャリブレーション
public void CaliblationUpdate(ShadowPackage proccesedMatPckage)
{
Mat srcImg = new Mat();
//入力画像
srcImg = proccesedMatPckage.srcMat.Clone();
this.imgChannles = srcImg.Channels();
#region
if (this.fragCutImg == 1)
{
this.cutRect = new CutRect(srcImg);
this.fragCutImg = 0;
this.imageWidth = srcImg.Width;
this.imageHeight = srcImg.Height;
}
#endregion
//座標の取得
this.backIn_Pt = this.changePt(this._getPtForCalib(0));
this.backOut_Pt = this.changePt(this._getPtForCalib(1));
this.floorIn_Pt = this.changePt(this._getPtForCalib(2));
this.floorOut_Pt = this.changePt(this._getPtForCalib(3));
this.backOut_Pt = this.changePtRange(this.backOut_Pt);
this.floorOut_Pt = this.changePtRange(this.floorOut_Pt);
//back
#region
this.backIn_Pt = this.changePtRange(this.backIn_Pt);
this.backDstImg = this.PerspectiveProject(srcImg, this.backIn_Pt, this.backOut_Pt).Clone();
this.backDstImg = cutRect.CutImage(this.backDstImg,this.backOut_Pt).Clone();
#endregion
//floor
#region
this.floorIn_Pt = this.changePtRange(this.floorIn_Pt);
this.floorDstImg = this.PerspectiveProject(srcImg, this.floorIn_Pt, this.floorOut_Pt).Clone();
this.floorDstImg = cutRect.CutImage(this.floorDstImg, this.floorOut_Pt).Clone();
#endregion
srcImg.Dispose();
}
示例2: ToWriteableBitmap
/// <summary>
/// MatをWriteableBitmapに変換する.
/// 返却値を新たに生成せず引数で指定したWriteableBitmapに格納するので、メモリ効率が良い。
/// </summary>
/// <param name="src">変換するMat</param>
/// <param name="dst">変換結果を設定するWriteableBitmap</param>
#else
/// <summary>
/// Converts Mat to WriteableBitmap.
/// This method is more efficient because new instance of WriteableBitmap is not allocated.
/// </summary>
/// <param name="src">Input Mat</param>
/// <param name="dst">Output WriteableBitmap</param>
#endif
public static void ToWriteableBitmap(Mat src, WriteableBitmap dst)
{
if (src == null)
throw new ArgumentNullException("src");
if (dst == null)
throw new ArgumentNullException("dst");
if (src.Width != dst.PixelWidth || src.Height != dst.PixelHeight)
throw new ArgumentException("size of src must be equal to size of dst");
//if (src.Depth != BitDepth.U8)
//throw new ArgumentException("bit depth of src must be BitDepth.U8", "src");
if (src.Dims() > 2)
throw new ArgumentException("Mat dimensions must be 2");
int w = src.Width;
int h = src.Height;
int bpp = dst.Format.BitsPerPixel;
int channels = GetOptimumChannels(dst.Format);
if (src.Channels() != channels)
{
throw new ArgumentException("channels of dst != channels of PixelFormat", "dst");
}
bool submat = src.IsSubmatrix();
bool continuous = src.IsContinuous();
unsafe
{
byte* pSrc = (byte*)(src.Data);
int sstep = (int)src.Step();
if (bpp == 1)
{
if (submat)
throw new NotImplementedException("submatrix not supported");
// 手作業で移し替える
int stride = w / 8 + 1;
if (stride < 2)
stride = 2;
byte[] pixels = new byte[h * stride];
for (int x = 0, y = 0; y < h; y++)
{
int offset = y * stride;
for (int bytePos = 0; bytePos < stride; bytePos++)
{
if (x < w)
{
byte b = 0;
// 現在の位置から横8ピクセル分、ビットがそれぞれ立っているか調べ、1つのbyteにまとめる
for (int i = 0; i < 8; i++)
{
b <<= 1;
if (x < w && pSrc[sstep * y + x] != 0)
{
b |= 1;
}
x++;
}
pixels[offset + bytePos] = b;
}
}
x = 0;
}
dst.WritePixels(new Int32Rect(0, 0, w, h), pixels, stride, 0);
return;
}
// 一気にコピー
if (!submat && continuous)
{
long imageSize = src.DataEnd.ToInt64() - src.Data.ToInt64();
if (imageSize < 0)
throw new OpenCvSharpException("The mat has invalid data pointer");
if (imageSize > Int32.MaxValue)
throw new OpenCvSharpException("Too big mat data");
dst.WritePixels(new Int32Rect(0, 0, w, h), src.Data, (int)imageSize, sstep);
return;
}
// 一列ごとにコピー
try
{
dst.Lock();
//.........这里部分代码省略.........
示例3: ToMat
/// <summary>
/// System.Drawing.BitmapからOpenCVのMatへ変換して返す.
/// </summary>
/// <param name="src">変換するSystem.Drawing.Bitmap</param>
/// <param name="dst">変換結果を格納するMat</param>
#else
/// <summary>
/// Converts System.Drawing.Bitmap to Mat
/// </summary>
/// <param name="src">System.Drawing.Bitmap object to be converted</param>
/// <param name="dst">A Mat object which is converted from System.Drawing.Bitmap</param>
#endif
public static unsafe void ToMat(this Bitmap src, Mat dst)
{
if (src == null)
throw new ArgumentNullException("src");
if (dst == null)
throw new ArgumentNullException("dst");
if (dst.IsDisposed)
throw new ArgumentException("The specified dst is disposed.", "dst");
if (dst.Depth() != MatType.CV_8U)
throw new NotSupportedException("Mat depth != CV_8U");
if (dst.Dims() != 2)
throw new NotSupportedException("Mat dims != 2");
if (src.Width != dst.Width || src.Height != dst.Height)
throw new ArgumentException("src.Size != dst.Size");
int w = src.Width;
int h = src.Height;
Rectangle rect = new Rectangle(0, 0, w, h);
BitmapData bd = null;
try
{
bd = src.LockBits(rect, ImageLockMode.ReadOnly, src.PixelFormat);
byte* p = (byte*)bd.Scan0.ToPointer();
int sstep = bd.Stride;
int offset = sstep - (w / 8);
uint dstep = (uint)dst.Step();
IntPtr dstData = dst.Data;
byte* dstPtr = (byte*)dstData.ToPointer();
bool submat = dst.IsSubmatrix();
bool continuous = dst.IsContinuous();
switch (src.PixelFormat)
{
case PixelFormat.Format1bppIndexed:
{
if (dst.Channels() != 1)
throw new ArgumentException("Invalid nChannels");
if (submat)
throw new NotImplementedException("submatrix not supported");
int x = 0;
int y;
int bytePos;
byte b;
int i;
for (y = 0; y < h; y++)
{
// 横は必ず4byte幅に切り上げられる。
// この行の各バイトを調べていく
for (bytePos = 0; bytePos < sstep; bytePos++)
{
if (x < w)
{
// 現在の位置のバイトからそれぞれのビット8つを取り出す
b = p[bytePos];
for (i = 0; i < 8; i++)
{
if (x >= w)
{
break;
}
// IplImageは8bit/pixel
dstPtr[dstep * y + x] = ((b & 0x80) == 0x80) ? (byte)255 : (byte)0;
b <<= 1;
x++;
}
}
}
// 次の行へ
x = 0;
p += sstep;
}
}
break;
case PixelFormat.Format8bppIndexed:
case PixelFormat.Format24bppRgb:
{
if (src.PixelFormat == PixelFormat.Format8bppIndexed)
if (dst.Channels() != 1)
throw new ArgumentException("Invalid nChannels");
if (src.PixelFormat == PixelFormat.Format24bppRgb)
if (dst.Channels() != 3)
throw new ArgumentException("Invalid nChannels");
// ステップが同じで連続なら、一気にコピー
//.........这里部分代码省略.........
示例4: ToMat
/// <summary>
/// BitmapSourceをMatに変換する.
/// </summary>
/// <param name="src">変換するBitmapSource</param>
/// <param name="dst">出力先のMat</param>
#else
/// <summary>
/// Converts BitmapSource to Mat
/// </summary>
/// <param name="src">Input BitmapSource</param>
/// <param name="dst">Output Mat</param>
#endif
public static void ToMat(this BitmapSource src, Mat dst)
{
if (src == null)
throw new ArgumentNullException("src");
if (dst == null)
throw new ArgumentNullException("dst");
if (src.PixelWidth != dst.Width || src.PixelHeight != dst.Height)
throw new ArgumentException("size of src must be equal to size of dst");
if (dst.Dims() > 2)
throw new ArgumentException("Mat dimensions must be 2");
int w = src.PixelWidth;
int h = src.PixelHeight;
int bpp = src.Format.BitsPerPixel;
int channels = WriteableBitmapConverter.GetOptimumChannels(src.Format);
if (dst.Channels() != channels)
{
throw new ArgumentException("nChannels of dst is invalid", "dst");
}
bool submat = dst.IsSubmatrix();
bool continuous = dst.IsContinuous();
unsafe
{
byte* p = (byte*)(dst.Data);
long step = dst.Step();
// 1bppは手作業でコピー
if (bpp == 1)
{
if (submat)
throw new NotImplementedException("submatrix not supported");
// BitmapImageのデータを配列にコピー
// 要素1つに横8ピクセル分のデータが入っている。
int stride = (w / 8) + 1;
byte[] pixels = new byte[h * stride];
src.CopyPixels(pixels, stride, 0);
int x = 0;
for (int y = 0; y < h; y++)
{
int offset = y * stride;
// この行の各バイトを調べていく
for (int bytePos = 0; bytePos < stride; bytePos++)
{
if (x < w)
{
// 現在の位置のバイトからそれぞれのビット8つを取り出す
byte b = pixels[offset + bytePos];
for (int i = 0; i < 8; i++)
{
if (x >= w)
{
break;
}
p[step * y + x] = ((b & 0x80) == 0x80) ? (byte)255 : (byte)0;
b <<= 1;
x++;
}
}
}
// 次の行へ
x = 0;
}
}
// 8bpp
/*else if (bpp == 8)
{
int stride = w;
byte[] pixels = new byte[h * stride];
src.CopyPixels(pixels, stride, 0);
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
p[step * y + x] = pixels[y * stride + x];
}
}
}*/
// 24bpp, 32bpp, ...
else
{
int stride = w * ((bpp + 7) / 8);
if (!submat && continuous)
{
long imageSize = dst.DataEnd.ToInt64() - dst.Data.ToInt64();
//.........这里部分代码省略.........
示例5: Converter
Mat Converter(Mat colorImage)
{
int channel = colorImage.Channels();
int imageW = colorImage.Width;
int imageH = colorImage.Height;
//colorImage.CvtColor(OpenCvSharp.ColorConversion.BgrToHsv);
Mat grayImage = new Mat(imageH, imageW, MatType.CV_8UC1);
unsafe
{
byte* matPtr = grayImage.DataPointer;
byte* colorPtr = colorImage.DataPointer;
for (int i = 0; i < imageW * imageH; i++)
{
int red = (*(colorPtr + i * channel) + *(colorPtr + i * channel + 1))/2;
//color Comperer
if (0 < *(colorPtr + i * channel))
{
*(matPtr + i) = 0;
}
else
{
*(matPtr + i) = 255;
}
}
}
return grayImage;
}
示例6: detectBarcode
private static string detectBarcode(string fileName, double thresh, bool debug = false, double rotation = 0)
{
Console.WriteLine("\nProcessing: {0}", fileName);
// load the image and convert it to grayscale
var image = new Mat(fileName);
if (rotation != 0)
{
rotateImage(image, image, rotation, 1);
}
if (debug)
{
Cv2.ImShow("Source", image);
Cv2.WaitKey(1); // do events
}
var gray = new Mat();
var channels = image.Channels();
if (channels > 1)
{
Cv2.CvtColor(image, gray, ColorConversion.BgrToGray);
}
else
{
image.CopyTo(gray);
}
// compute the Scharr gradient magnitude representation of the images
// in both the x and y direction
var gradX = new Mat();
Cv2.Sobel(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0, ksize: -1);
//Cv2.Scharr(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0);
var gradY = new Mat();
Cv2.Sobel(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1, ksize: -1);
//Cv2.Scharr(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1);
// subtract the y-gradient from the x-gradient
var gradient = new Mat();
Cv2.Subtract(gradX, gradY, gradient);
Cv2.ConvertScaleAbs(gradient, gradient);
if (debug)
{
Cv2.ImShow("Gradient", gradient);
Cv2.WaitKey(1); // do events
}
// blur and threshold the image
var blurred = new Mat();
Cv2.Blur(gradient, blurred, new Size(9, 9));
var threshImage = new Mat();
Cv2.Threshold(blurred, threshImage, thresh, 255, ThresholdType.Binary);
if (debug)
{
Cv2.ImShow("Thresh", threshImage);
Cv2.WaitKey(1); // do events
}
// construct a closing kernel and apply it to the thresholded image
var kernel = Cv2.GetStructuringElement(StructuringElementShape.Rect, new Size(21, 7));
var closed = new Mat();
Cv2.MorphologyEx(threshImage, closed, MorphologyOperation.Close, kernel);
if (debug)
{
Cv2.ImShow("Closed", closed);
Cv2.WaitKey(1); // do events
}
// perform a series of erosions and dilations
Cv2.Erode(closed, closed, null, iterations: 4);
Cv2.Dilate(closed, closed, null, iterations: 4);
if (debug)
{
Cv2.ImShow("Erode & Dilate", closed);
Cv2.WaitKey(1); // do events
}
//find the contours in the thresholded image, then sort the contours
//by their area, keeping only the largest one
Point[][] contours;
HiearchyIndex[] hierarchyIndexes;
Cv2.FindContours(
closed,
out contours,
out hierarchyIndexes,
mode: ContourRetrieval.CComp,
method: ContourChain.ApproxSimple);
if (contours.Length == 0)
{
throw new NotSupportedException("Couldn't find any object in the image.");
}
//.........这里部分代码省略.........
示例7: Update
//.........这里部分代码省略.........
}
fs.Write(nodeName, kinectPts.ToCvMat());
nodeName = "projectorPoints";
Mat projPts = new Mat(1, projectorCoordinates.Count, MatType.CV_64FC2);
for (int i = 0; i < projectorCoordinates.Count; i++)
{
projPts.Set<CvPoint2D64f>(0, i, (CvPoint2D64f)projectorCoordinates[i]);
}
fs.Write(nodeName, projPts.ToCvMat());
fs.Dispose();
}
Debug.Log("Calib Data saved!");
}
if (Input.GetKeyDown(KeyCode.Q))
{
delLastPoints();
}
if (kinect.GetDepthRaw())
{
try
{
Mat src = DoDepthBuffer(kinect.usersDepthMap, KinectWrapper.GetDepthWidth(), KinectWrapper.GetDepthHeight());
dBuffer = src.Clone();
src.ConvertTo(src, OpenCvSharp.CPlusPlus.MatType.CV_8UC1, 255.0f / NUI_IMAGE_DEPTH_MAXIMUM);
Mat show = new Mat(KinectWrapper.GetDepthHeight(), KinectWrapper.GetDepthWidth(), OpenCvSharp.CPlusPlus.MatType.CV_8UC4);
Mat alpha = new Mat(KinectWrapper.GetDepthHeight(), KinectWrapper.GetDepthWidth(), OpenCvSharp.CPlusPlus.MatType.CV_8UC1, new Scalar(255));
Mat[] planes = new Mat[4] { src, src, src, alpha };
Cv2.Merge(planes, show);
//Mat falseColorsMap = new Mat();
//Cv2.ApplyColorMap(src, falseColorsMap, OpenCvSharp.CPlusPlus.ColorMapMode.Rainbow);
//Cv2.ImShow("show", falseColorsMap);
int matSize = (int)show.Total() * show.Channels();
byte[] rColors = new byte[matSize];
Marshal.Copy(show.DataStart, rColors, 0, matSize);
scTex.LoadRawTextureData(rColors);
scTex.Apply(false);
ScreenObject.GetComponent<RawImage>().texture = scTex;
if (showResult)
{
//ResultObject.SetActive(true);
screenTx.SetPixels32(resetPixels);
long discarded = 0;
long drawn = 0;
long bounds = 0;
//Color32[] txcl = (Color32[])resetPixels.Clone();
Color32[] txcl = new Color32[screenTx.height * screenTx.width];
for (int i = 0; i < txcl.Length; i++)
{
Color32 cCol = new Color32(0, 0, 0, 255);
txcl[i] = cCol;
}
screenTx.SetPixels32(txcl, 0);
Color32 sccolor = Color.white;
for (int i = 0; i < show.Rows; i += 5)
{
for (int j = 0; j < show.Cols; j += 5)
{
CvPoint3D64f realVal = NuiTransformDepthImageToSkeleton((long)j, (long)i, dBuffer.Get<ushort>((int)i, (int)j));
if (realVal.Z < projThresh && realVal.Z > 1.0)
{
CvPoint2D64f scCoord = convertKinectToProjector(realVal);
if (scCoord.X > 0.0 && scCoord.X < Screen.width && scCoord.Y > 0.0 && scCoord.Y < Screen.height)
{
//Debug.Log(scCoord.X.ToString() + " " + scCoord.Y.ToString());