当前位置: 首页>>代码示例>>C#>>正文


C# Mat.Size方法代码示例

本文整理汇总了C#中Mat.Size方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.Size方法的具体用法?C# Mat.Size怎么用?C# Mat.Size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat.Size方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: Run

        public void Run()
        {
            var src = new Mat(FilePath.Lenna, LoadMode.Color);
            var gray = new Mat(FilePath.Lenna, LoadMode.GrayScale);
            var dst = new Mat(src.Size(), MatType.CV_8UC3, Scalar.All(0));

            StarDetector detector = new StarDetector(45);
            KeyPoint[] keypoints = detector.Run(gray);

            if (keypoints != null)
            {
                var color = new Scalar(0, 255, 0);
                foreach (KeyPoint kpt in keypoints)
                {
                    float r = kpt.Size / 2;
                    Cv2.Circle(dst, kpt.Pt, (int)r, color, 1, LineType.Link8, 0);
                    Cv2.Line(dst, 
                        new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r), 
                        new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r), 
                        color, 1, LineType.Link8, 0);
                    Cv2.Line(dst, 
                        new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r), 
                        new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r), 
                        color, 1, LineType.Link8, 0);
                }
            }

            using (new Window("img", src))
            using (new Window("StarDetector features", dst))
            {
                Cv.WaitKey();
            }
        }
开发者ID:jorik041,项目名称:opencvsharp,代码行数:33,代码来源:StarDetectorSample.cs

示例2: Niblack

        /// <summary>
        /// Niblackの手法による二値化処理を行う。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="k">係数</param>
#else
        /// <summary>
        /// Binarizes by Niblack's method
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="k">Adequate coefficient</param>
#endif
        public static void Niblack(Mat src, Mat dst, int kernelSize, double k)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
                throw new ArgumentException("src must be gray scale image");
            if (dst.Type() != MatType.CV_8UC1)
                throw new ArgumentException("dst must be gray scale image");

            // サイズのチェック
            if (kernelSize < 3)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above");
            if (kernelSize % 2 == 0)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number");

            int width = src.Width;
            int height = src.Height;
            dst.Create(src.Size(), src.Type());

            using (var tSrcMat = new MatOfByte(src))
            using (var tDstMat = new MatOfByte(dst))
            {
                var tSrc = tSrcMat.GetIndexer();
                var tDst = tDstMat.GetIndexer();

                //for (int y = 0; y < gray.Height; y++)
                MyParallel.For(0, height, delegate(int y)
                {
                    for (int x = 0; x < width; x++)
                    {
                        double m, s;
                        MeanStddev(src, x, y, kernelSize, out m, out s);
                        double threshold = m + k * s;

                        if (tSrc[y, x] < threshold)
                            tDst[y, x] = 0;
                        else
                            tDst[y, x] = 255;
                    }
                }
                );
            }
        }
开发者ID:JiphuTzu,项目名称:opencvsharp,代码行数:63,代码来源:Binarizer.cs

示例3: Run

        public void Run()
        {
            using (Mat src = new Mat(FilePath.Distortion, LoadMode.Color))
            using (Mat gray = new Mat(src.Size(), MatType.CV_8UC1))
            using (Mat dst = src.Clone())
            {
                Cv2.CvtColor(src, gray, ColorConversion.BgrToGray);

                CppStyleMSER(gray, dst);  // C++ style

                using (new Window("MSER src", src))
                using (new Window("MSER gray", gray))
                using (new Window("MSER dst", dst))
                {
                    Cv.WaitKey();
                }
            }
        }
开发者ID:jorik041,项目名称:opencvsharp,代码行数:18,代码来源:MSERSample.cs

示例4: DetectFace

        public ActionResult DetectFace(HttpPostedFileBase imageData)
        {
            try
            {
                if (imageData == null) { throw new ArgumentException("File is not exist."); }

                using (var img = Mat.FromStream(imageData.InputStream, LoadMode.Color))
                {
                    var ExecutingAssemblyPath = new Uri(Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().CodeBase.ToString())).LocalPath;
                    double scale = 2.0;
                    using (var gray = new Mat())
                    using (var smallImg = new Mat((int)(img.Rows / scale), (int)(img.Cols / scale), MatType.CV_8UC1))
                    {
                        Cv2.CvtColor(img, gray, ColorConversion.BgrToGray);
                        Cv2.Resize(gray, smallImg, smallImg.Size(), 0, 0, Interpolation.Linear);
                        Cv2.EqualizeHist(smallImg, smallImg);

                        byte[] imgBytes = img.ToBytes(".png");
                        string base64Img = Convert.ToBase64String(imgBytes);
                        ViewBag.Base64Img = base64Img;

                        var obj = new CascadeClassifier();
                        var cascadeFilePath = Path.Combine(ExecutingAssemblyPath, "Content\\haarcascade_frontalface_alt.xml");
                        if (!obj.Load(cascadeFilePath)) { throw new InvalidOperationException("Failed to load classifier file."); }

                        var rects = obj.DetectMultiScale(smallImg);

                        var nestedObj = new CascadeClassifier();
                        var nestedCascadeFilePath = Path.Combine(ExecutingAssemblyPath, "Content\\haarcascade_eye.xml");
                        //var nestedCascadeFilePath = Path.Combine(ExecutingAssemblyPath, "Content\\haarcascade_eye_tree_eyeglasses.xml");
                        if (!nestedObj.Load(nestedCascadeFilePath)) { throw new InvalidOperationException("Failed to load classifier file."); }
                        foreach(var rect in rects)
                        {
                            Point faceCenter;
                            faceCenter.X = (int)((rect.X + rect.Width * 0.5) * scale);
                            faceCenter.Y = (int)((rect.Y + rect.Height * 0.5) * scale);
                            int faceRadius = (int)((rect.Width + rect.Height) * 0.25 * scale);
                            Cv2.Circle(img, faceCenter, faceRadius, new Scalar(80, 80, 255), 3, LineType.Link8, 0);

                            Mat smallImgROI = new Mat(smallImg, rect);
                            var nestedRects = nestedObj.DetectMultiScale(smallImgROI);

                            foreach(var nestedRect in nestedRects)
                            {
                                Point center;
                                center.X = (int)((rect.X + nestedRect.X + nestedRect.Width * 0.5) * scale);
                                center.Y = (int)((rect.Y +  nestedRect.Y + nestedRect.Height * 0.5) * scale);
                                int radius = (int)((nestedRect.Width + nestedRect.Height) * 0.25 * scale);
                                Cv2.Circle(img, center, radius, new Scalar(80, 255, 80), 3, LineType.Link8, 0);
                            }
                        }

                        byte[] resultBytes = img.ToBytes(".png");
                        string base64Result = Convert.ToBase64String(resultBytes);
                        ViewBag.Base64OrgResult = base64Result;

                        byte[] grayBytes = gray.ToBytes(".png");
                        string base64Gray = Convert.ToBase64String(grayBytes);
                        ViewBag.Base64Gray = base64Gray;

                        byte[] smallBytes = smallImg.ToBytes(".png");
                        string base64Small = Convert.ToBase64String(smallBytes);
                        ViewBag.Base64Small = base64Small;
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.ToString());
            }

            return View();
        }
开发者ID:takuya1981,项目名称:OpenCVSample,代码行数:73,代码来源:HomeController.cs

示例5: Dft

        public static void Dft(string path)
        {
            Mat img = Cv2.ImRead(path, LoadMode.GrayScale);

            // expand input image to optimal size
            Mat padded = new Mat();
            int m = Cv2.GetOptimalDFTSize(img.Rows);
            int n = Cv2.GetOptimalDFTSize(img.Cols); // on the border add zero values
            Cv2.CopyMakeBorder(img, padded, 0, m - img.Rows, 0, n - img.Cols, BorderType.Constant, Scalar.All(0));

            // Add to the expanded another plane with zeros
            Mat paddedF32 = new Mat();
            padded.ConvertTo(paddedF32, MatType.CV_32F);
            Mat[] planes = { paddedF32, Mat.Zeros(padded.Size(), MatType.CV_32F) };
            Mat complex = new Mat();
            Cv2.Merge(planes, complex);

            // this way the result may fit in the source matrix
            Mat dft = new Mat();
            Cv2.Dft(complex, dft);

            // compute the magnitude and switch to logarithmic scale
            // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
            Mat[] dftPlanes;
            Cv2.Split(dft, out dftPlanes);  // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))

            // planes[0] = magnitude
            Mat magnitude = new Mat();
            Cv2.Magnitude(dftPlanes[0], dftPlanes[1], magnitude);

            magnitude += Scalar.All(1);  // switch to logarithmic scale
            Cv2.Log(magnitude, magnitude);

            // crop the spectrum, if it has an odd number of rows or columns
            Mat spectrum = magnitude[
                new Rect(0, 0, magnitude.Cols & -2, magnitude.Rows & -2)];

            // rearrange the quadrants of Fourier image  so that the origin is at the image center
            int cx = spectrum.Cols / 2;
            int cy = spectrum.Rows / 2;

            Mat q0 = new Mat(spectrum, new Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
            Mat q1 = new Mat(spectrum, new Rect(cx, 0, cx, cy));  // Top-Right
            Mat q2 = new Mat(spectrum, new Rect(0, cy, cx, cy));  // Bottom-Left
            Mat q3 = new Mat(spectrum, new Rect(cx, cy, cx, cy)); // Bottom-Right

            // swap quadrants (Top-Left with Bottom-Right)
            Mat tmp = new Mat();
            q0.CopyTo(tmp);
            q3.CopyTo(q0);
            tmp.CopyTo(q3);

            // swap quadrant (Top-Right with Bottom-Left)
            q1.CopyTo(tmp);
            q2.CopyTo(q1);
            tmp.CopyTo(q2);

            // Transform the matrix with float values into a
            Cv2.Normalize(spectrum, spectrum, 0, 1, NormType.MinMax);

            // Show the result
            Cv2.ImShow("Spectrum Magnitude", spectrum);
            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();
        }
开发者ID:Muraad,项目名称:DynamoOpenCV,代码行数:65,代码来源:OpenCv.cs

示例6: NiblackFast

        /// <summary>
        /// Niblackの手法による二値化処理を行う(高速だが、メモリを多く消費するバージョン)。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="k">係数</param>
#else
        /// <summary>
        /// Binarizes by Niblack's method (This is faster but memory-hogging)
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="k">Adequate coefficient</param>
#endif
        public static void NiblackFast(Mat src, Mat dst, int kernelSize, double k)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
                throw new ArgumentException("src must be gray scale image");
            if (dst.Type() != MatType.CV_8UC1)
                throw new ArgumentException("dst must be gray scale image");

            // サイズのチェック
            if (kernelSize < 3)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above");
            if (kernelSize % 2 == 0)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number");

            int borderSize = kernelSize / 2;
            int width = src.Width;
            int height = src.Height;
            dst.Create(src.Size(), src.Type());

            using (var tempMat = new Mat(height + (borderSize * 2), width + (borderSize * 2), src.Type()))
            using (var sumMat = new Mat(tempMat.Height + 1, tempMat.Width + 1, MatType.CV_64FC1, 1))
            using (var sqSumMat = new Mat(tempMat.Height + 1, tempMat.Width + 1, MatType.CV_64FC1, 1))
            {
                Cv2.CopyMakeBorder(src, tempMat, borderSize, borderSize, borderSize, borderSize, BorderTypes.Replicate, Scalar.All(0));
                Cv2.Integral(tempMat, sumMat, sqSumMat);

                using (var tSrcMat = new MatOfByte(src))
                using (var tDstMat = new MatOfByte(dst))
                using (var tSumMat = new MatOfDouble(sumMat))
                using (var tSqSumMat = new MatOfDouble(sqSumMat))
                {
                    var tSrc = tSrcMat.GetIndexer();
                    var tDst = tDstMat.GetIndexer();
                    var tSum = tSumMat.GetIndexer();
                    var tSqSum = tSqSumMat.GetIndexer();

                    int ylim = height + borderSize;
                    int xlim = width + borderSize;
                    int kernelPixels = kernelSize * kernelSize;
                    for (int y = borderSize; y < ylim; y++)
                    {
                        for (int x = borderSize; x < xlim; x++)
                        {
                            int x1 = x - borderSize;
                            int y1 = y - borderSize;
                            int x2 = x + borderSize + 1;
                            int y2 = y + borderSize + 1;
                            double sum = tSum[y2, x2] - tSum[y2, x1] - tSum[y1, x2] + tSum[y1, x1];
                            double sqsum = tSqSum[y2, x2] - tSqSum[y2, x1] - tSqSum[y1, x2] + tSqSum[y1, x1];
                            double mean = sum / kernelPixels;
                            double var = (sqsum / kernelPixels) - (mean * mean);
                            if (var < 0.0) var = 0.0;
                            double stddev = Math.Sqrt(var);

                            double threshold = mean + k * stddev;
                            if (tSrc[y - borderSize, x - borderSize] < threshold)
                                tDst[y - borderSize, x - borderSize] = 0;
                            else
                                tDst[y - borderSize, x - borderSize] = 255;
                        }
                    }
                }
            }
        }
开发者ID:JiphuTzu,项目名称:opencvsharp,代码行数:85,代码来源:Binarizer.cs

示例7: Bernsen

        /// <summary>
        /// Bernsenの手法による二値化処理を行う。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="constrastMin">この数値以下のコントラストの領域は背景領域とみなす</param>
        /// <param name="bgThreshold">背景領域と見なされた領域に適用する閾値(背景領域以外では、適応的に閾値を求める)</param>
#else
        /// <summary>
        /// Binarizes by Bernsen's method
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="constrastMin">Adequate coefficient</param>
        /// <param name="bgThreshold">Adequate coefficient</param>
#endif
        public static void Bernsen(Mat src, Mat dst, int kernelSize, byte constrastMin, byte bgThreshold)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
                throw new ArgumentException("src must be gray scale image");
            if (dst.Type() != MatType.CV_8UC1)
                throw new ArgumentException("dst must be gray scale image");

            // サイズのチェック
            if (kernelSize < 3)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above");
            if (kernelSize % 2 == 0)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number");

            int width = src.Width;
            int height = src.Height;
            dst.Create(src.Size(), src.Type());

            using (var tSrcMat = new MatOfByte(src))
            using (var tDstMat = new MatOfByte(dst))
            {
                var tSrc = tSrcMat.GetIndexer();
                var tDst = tDstMat.GetIndexer();

                for (int y = 0; y < height; y++)
                {
                    for (int x = 0; x < width; x++)
                    {
                        byte min, max;
                        MinMax(src, x, y, kernelSize, out min, out max);

                        int contrast = max - min;
                        byte threshold;
                        if (contrast < constrastMin)
                            threshold = bgThreshold;
                        else
                            threshold = (byte)((max + min) / 2);

                        if (tSrc[y, x] <= threshold)
                            tDst[y, x] = 0;
                        else
                            tDst[y, x] = 255;
                    }
                }
            }

        }
开发者ID:JiphuTzu,项目名称:opencvsharp,代码行数:70,代码来源:Binarizer.cs

示例8: watershedExample

        /// <summary>
        /// https://github.com/Itseez/opencv_extra/blob/master/learning_opencv_v2/ch9_watershed.cpp
        /// </summary>
        private static void watershedExample()
        {
            var src = new Mat(@"..\..\Images\corridor.jpg", LoadMode.AnyDepth | LoadMode.AnyColor);
            var srcCopy = new Mat();
            src.CopyTo(srcCopy);

            var markerMask = new Mat();
            Cv2.CvtColor(srcCopy, markerMask, ColorConversion.BgrToGray);

            var imgGray = new Mat();
            Cv2.CvtColor(markerMask, imgGray, ColorConversion.GrayToBgr);
            markerMask = new Mat(markerMask.Size(), markerMask.Type(), s: Scalar.All(0));

            var sourceWindow = new Window("Source (Select areas by mouse and then press space)")
            {
                Image = srcCopy
            };

            var previousPoint = new Point(-1, -1);
            sourceWindow.OnMouseCallback += (@event, x, y, flags) =>
            {
                if (x < 0 || x >= srcCopy.Cols || y < 0 || y >= srcCopy.Rows)
                {
                    return;
                }

                if (@event == MouseEvent.LButtonUp || !flags.HasFlag(MouseEvent.FlagLButton))
                {
                    previousPoint = new Point(-1, -1);
                }
                else if (@event == MouseEvent.LButtonDown)
                {
                    previousPoint = new Point(x, y);
                }
                else if (@event == MouseEvent.MouseMove && flags.HasFlag(MouseEvent.FlagLButton))
                {
                    var pt = new Point(x, y);
                    if (previousPoint.X < 0)
                    {
                        previousPoint = pt;
                    }

                    Cv2.Line(img: markerMask, pt1: previousPoint, pt2: pt, color: Scalar.All(255), thickness: 5);
                    Cv2.Line(img: srcCopy, pt1: previousPoint, pt2: pt, color: Scalar.All(255), thickness: 5);
                    previousPoint = pt;
                    sourceWindow.Image = srcCopy;
                }
            };

            var rnd = new Random();

            for (; ; )
            {
                var key = Cv2.WaitKey(0);

                if ((char)key == 27) // ESC
                {
                    break;
                }

                if ((char)key == 'r') // Reset
                {
                    markerMask = new Mat(markerMask.Size(), markerMask.Type(), s: Scalar.All(0));
                    src.CopyTo(srcCopy);
                    sourceWindow.Image = srcCopy;
                }

                if ((char)key == 'w' || (char)key == ' ') // Apply watershed
                {
                    Point[][] contours; //vector<vector<Point>> contours;
                    HiearchyIndex[] hierarchyIndexes; //vector<Vec4i> hierarchy;
                    Cv2.FindContours(
                        markerMask,
                        out contours,
                        out hierarchyIndexes,
                        mode: ContourRetrieval.CComp,
                        method: ContourChain.ApproxSimple);

                    if (contours.Length == 0)
                    {
                        continue;
                    }

                    var markers = new Mat(markerMask.Size(), MatType.CV_32S, s: Scalar.All(0));

                    var componentCount = 0;
                    var contourIndex = 0;
                    while ((contourIndex >= 0))
                    {
                        Cv2.DrawContours(
                            markers,
                            contours,
                            contourIndex,
                            color: Scalar.All(componentCount+1),
                            thickness: -1,
                            lineType: LineType.Link8,
                            hierarchy: hierarchyIndexes,
//.........这里部分代码省略.........
开发者ID:kauser-cse-buet,项目名称:OpenCVSharp-Samples,代码行数:101,代码来源:Program.cs

示例9: rotateImage

 private static void rotateImage(Mat src, Mat dst, double angle, double scale)
 {
     var imageCenter = new Point2f(src.Cols / 2f, src.Rows / 2f);
     var rotationMat = Cv2.GetRotationMatrix2D(imageCenter, angle, scale);
     Cv2.WarpAffine(src, dst, rotationMat, src.Size());
 }
开发者ID:kauser-cse-buet,项目名称:OpenCVSharp-Samples,代码行数:6,代码来源:Program.cs

示例10: Main

        static void Main(string[] args)
        {
            var srcImage = new Mat(@"..\..\Images\cvlbl.png");
            Cv2.ImShow("Source", srcImage);
            Cv2.WaitKey(1); // do events

            var binaryImage = new Mat(srcImage.Size(), MatType.CV_8UC1);

            Cv2.CvtColor(srcImage, binaryImage, ColorConversion.BgrToGray);
            Cv2.Threshold(binaryImage, binaryImage, thresh: 100, maxval: 255, type: ThresholdType.Binary);

            var detectorParams = new SimpleBlobDetector.Params
            {
                //MinDistBetweenBlobs = 10, // 10 pixels between blobs
                //MinRepeatability = 1,

                //MinThreshold = 100,
                //MaxThreshold = 255,
                //ThresholdStep = 5,

                FilterByArea = false,
                //FilterByArea = true,
                //MinArea = 0.001f, // 10 pixels squared
                //MaxArea = 500,

                FilterByCircularity = false,
                //FilterByCircularity = true,
                //MinCircularity = 0.001f,

                FilterByConvexity = false,
                //FilterByConvexity = true,
                //MinConvexity = 0.001f,
                //MaxConvexity = 10,

                FilterByInertia = false,
                //FilterByInertia = true,
                //MinInertiaRatio = 0.001f,

                FilterByColor = false
                //FilterByColor = true,
                //BlobColor = 255 // to extract light blobs
            };
            var simpleBlobDetector = new SimpleBlobDetector(detectorParams);
            var keyPoints = simpleBlobDetector.Detect(binaryImage);

            Console.WriteLine("keyPoints: {0}", keyPoints.Length);
            foreach (var keyPoint in keyPoints)
            {
                Console.WriteLine("X: {0}, Y: {1}", keyPoint.Pt.X, keyPoint.Pt.Y);
            }

            var imageWithKeyPoints = new Mat();
            Cv2.DrawKeypoints(
                    image: binaryImage,
                    keypoints: keyPoints,
                    outImage: imageWithKeyPoints,
                    color: Scalar.FromRgb(255, 0, 0),
                    flags: DrawMatchesFlags.DrawRichKeypoints);

            Cv2.ImShow("Key Points", imageWithKeyPoints);
            Cv2.WaitKey(1); // do events

            Cv2.WaitKey(0);

            Cv2.DestroyAllWindows();
            srcImage.Dispose();
            imageWithKeyPoints.Dispose();
        }
开发者ID:kauser-cse-buet,项目名称:OpenCVSharp-Samples,代码行数:68,代码来源:Program.cs


注:本文中的Mat.Size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。