当前位置: 首页>>代码示例>>C#>>正文


C# Mat.put方法代码示例

本文整理汇总了C#中Mat.put方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.put方法的具体用法?C# Mat.put怎么用?C# Mat.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: setHsvColor

		public void setHsvColor (Scalar hsvColor)
		{
				double minH = (hsvColor.val [0] >= mColorRadius.val [0]) ? hsvColor.val [0] - mColorRadius.val [0] : 0;
				double maxH = (hsvColor.val [0] + mColorRadius.val [0] <= 255) ? hsvColor.val [0] + mColorRadius.val [0] : 255;
		
				mLowerBound.val [0] = minH;
				mUpperBound.val [0] = maxH;
		
				mLowerBound.val [1] = hsvColor.val [1] - mColorRadius.val [1];
				mUpperBound.val [1] = hsvColor.val [1] + mColorRadius.val [1];
		
				mLowerBound.val [2] = hsvColor.val [2] - mColorRadius.val [2];
				mUpperBound.val [2] = hsvColor.val [2] + mColorRadius.val [2];
		
				mLowerBound.val [3] = 0;
				mUpperBound.val [3] = 255;
		
				Mat spectrumHsv = new Mat (1, (int)(maxH - minH), CvType.CV_8UC3);
		
				for (int j = 0; j < maxH-minH; j++) {
						byte[] tmp = {(byte)(minH + j), (byte)255, (byte)255};
						spectrumHsv.put (0, j, tmp);
				}
		
				Imgproc.cvtColor (spectrumHsv, mSpectrum, Imgproc.COLOR_HSV2RGB_FULL, 4);
		}
开发者ID:lsewata,项目名称:OpenCVForUnity,代码行数:26,代码来源:ColorBlobDetector.cs

示例2: Start

        // Use this for initialization
        void Start()
        {
            Texture2D inputTexture = Resources.Load ("lena") as Texture2D;

                        Mat inputMat = new Mat (inputTexture.height, inputTexture.width, CvType.CV_8UC4);

                        Utils.texture2DToMat (inputTexture, inputMat);
                        Debug.Log ("inputMat dst ToString " + inputMat.ToString ());

                        Mat src_mat = new Mat (4, 1, CvType.CV_32FC2);
                        Mat dst_mat = new Mat (4, 1, CvType.CV_32FC2);

                        src_mat.put (0, 0, 0.0, 0.0, inputMat.rows (), 0.0, 0.0, inputMat.cols (), inputMat.rows (), inputMat.cols ());
                        dst_mat.put (0, 0, 0.0, 0.0, inputMat.rows (), 200.0, 0.0, inputMat.cols (), inputMat.rows (), inputMat.cols () - 200.0);
                        Mat perspectiveTransform = Imgproc.getPerspectiveTransform (src_mat, dst_mat);

                        Mat outputMat = inputMat.clone ();

                        Imgproc.warpPerspective (inputMat, outputMat, perspectiveTransform, new Size (inputMat.rows (), inputMat.cols ()));

                        Texture2D outputTexture = new Texture2D (outputMat.cols (), outputMat.rows (), TextureFormat.RGBA32, false);

                        Utils.matToTexture2D (outputMat, outputTexture);

                        gameObject.GetComponent<Renderer> ().material.mainTexture = outputTexture;
        }
开发者ID:ygx2011,项目名称:OpenCVForUnity,代码行数:27,代码来源:WrapPerspectiveSample.cs

示例3: Start

        // Use this for initialization
        void Start()
        {
            // Data for visual representation
                        int width = 512, height = 512;
                        Mat image = Mat.zeros (height, width, CvType.CV_8UC4);

                        // Set up training data
                        int[] labels = {1, -1, -1, -1};
                        float[] trainingData = { 501, 10, 255, 10, 501, 255, 10, 501 };
                        Mat trainingDataMat = new Mat (4, 2, CvType.CV_32FC1);
                        trainingDataMat.put (0, 0, trainingData);
                        Mat labelsMat = new Mat (4, 1, CvType.CV_32SC1);
                        labelsMat.put (0, 0, labels);

                        // Train the SVM
                        SVM svm = SVM.create ();
                        svm.setType (SVM.C_SVC);
                        svm.setKernel (SVM.LINEAR);
                        svm.setTermCriteria (new TermCriteria (TermCriteria.MAX_ITER, 100, 1e-6));
                        svm.train (trainingDataMat, Ml.ROW_SAMPLE, labelsMat);

                        // Show the decision regions given by the SVM
                        byte[] green = {0,255,0,255};
                        byte[] blue = {0,0,255,255};
                        for (int i = 0; i < image.rows(); ++i)
                                for (int j = 0; j < image.cols(); ++j) {
                                        Mat sampleMat = new Mat (1, 2, CvType.CV_32FC1);
                                        sampleMat.put (0, 0, j, i);

                                        float response = svm.predict (sampleMat);
                                        if (response == 1)
                                                image.put (i, j, green);
                                        else if (response == -1)
                                                image.put (i, j, blue);
                                }

                        // Show the training data
                        int thickness = -1;
                        int lineType = 8;

                        Imgproc.circle (image, new Point (501, 10), 5, new Scalar (0, 0, 0, 255), thickness, lineType, 0);
                        Imgproc.circle (image, new Point (255, 10), 5, new Scalar (255, 255, 255, 255), thickness, lineType, 0);
                        Imgproc.circle (image, new Point (501, 255), 5, new Scalar (255, 255, 255, 255), thickness, lineType, 0);
                        Imgproc.circle (image, new Point (10, 501), 5, new Scalar (255, 255, 255, 255), thickness, lineType, 0);

                        // Show support vectors
                        thickness = 2;
                        lineType = 8;
                        Mat sv = svm.getUncompressedSupportVectors ();
            //						Debug.Log ("sv.ToString() " + sv.ToString ());
            //						Debug.Log ("sv.dump() " + sv.dump ());
                        for (int i = 0; i < sv.rows(); ++i) {
                                Imgproc.circle (image, new Point ((int)sv.get (i, 0) [0], (int)sv.get (i, 1) [0]), 6, new Scalar (128, 128, 128, 255), thickness, lineType, 0);
                        }

                        Texture2D texture = new Texture2D (image.width (), image.height (), TextureFormat.RGBA32, false);
                        Utils.matToTexture2D (image, texture);
                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
        }
开发者ID:ygx2011,项目名称:OpenCVForUnity,代码行数:60,代码来源:SVMSample.cs

示例4: rotate

		/// <summary>
		/// Rotate the specified inMat.
		/// </summary>
		/// <param name="inMat">In mat.</param>
		public static Mat rotate (Mat inMat)
		{
				byte[] b = new byte[1];

				Mat outMat = new Mat ();
				inMat.copyTo (outMat);
				for (int i=0; i<inMat.rows(); i++) {
						for (int j=0; j<inMat.cols(); j++) {
								inMat.get (inMat.cols () - j - 1, i, b);
								outMat.put (i, j, b);

						}
				}
				return outMat;
		}
开发者ID:wlstks7,项目名称:MarkerBasedARSample,代码行数:19,代码来源:Marker.cs

示例5: read

		public void read (object root_json)
		{
				IDictionary pmodel_json = (IDictionary)root_json;
		
				IDictionary P_json = (IDictionary)pmodel_json ["P"];
				P = new Mat ((int)(long)P_json ["rows"], (int)(long)P_json ["cols"], CvType.CV_32F);
//				Debug.Log ("P " + P.ToString ());
		
				IList P_data_json = (IList)P_json ["data"];
				float[] P_data = new float[P.rows () * P.cols ()];
				for (int i = 0; i < P_data_json.Count; i++) {
						P_data [i] = (float)(double)P_data_json [i];
				}
				P.put (0, 0, P_data);
//				Debug.Log ("P dump " + P.dump ());
		
		
		
		}
开发者ID:mosnyder,项目名称:facerace,代码行数:19,代码来源:PatchModel.cs

示例6: convertToGrabCutValues

 private static void convertToGrabCutValues(Mat mask)
 {
     int width = mask.rows ();
     int height = mask.cols ();
     byte[] buffer = new byte[width * height];
     mask.get (0, 0, buffer);
     for (int x = 0; x < width; x++) {
         for (int y = 0; y < height; y++) {
             int value = buffer [y * width + x];
             if (value >= 0 && value < 64) {
                 buffer [y * width + x] = Imgproc.GC_BGD; // for sure background
             } else if (value >= 64 && value < 128) {
                 buffer [y * width + x] = Imgproc.GC_PR_BGD; // probably background
             } else if (value >= 128 && value < 192) {
                 buffer [y * width + x] = Imgproc.GC_PR_FGD; // probably foreground
             } else {
                 buffer [y * width + x] = Imgproc.GC_FGD; // for sure foreground
             }
         }
     }
     mask.put (0, 0, buffer);
 }
开发者ID:EnoxSoftware,项目名称:OpenCVForUnity,代码行数:22,代码来源:GrabCutSample.cs

示例7: convertToGrayScaleValues

        private static void convertToGrayScaleValues(Mat mask)
        {
            int width = mask.rows ();
            int height = mask.cols ();
            byte[] buffer = new byte[width * height];
            mask.get (0, 0, buffer);
            for (int x = 0; x < width; x++) {
                for (int y = 0; y < height; y++) {
                    int value = buffer [y * width + x];

                    if (value == Imgproc.GC_BGD) {
                        buffer [y * width + x] = 0; // for sure background
                    } else if (value == Imgproc.GC_PR_BGD) {
                        buffer [y * width + x] = 85; // probably background
                    } else if (value == Imgproc.GC_PR_FGD) {
                        buffer [y * width + x] = (byte)170; // probably foreground
                    } else {
                        buffer [y * width + x] = (byte)255; // for sure foreground
                    }
                }
            }
            mask.put (0, 0, buffer);
        }
开发者ID:EnoxSoftware,项目名称:OpenCVForUnity,代码行数:23,代码来源:GrabCutSample.cs

示例8: calc_peaks

		public Point[] calc_peaks (Mat im,
			           Point[] points,
			           OpenCVForUnity.Size ssize)
		{
				int n = points.Length;
//				Debug.Log ("n == int(patches.size()) " + patches.Count);
				using (Mat pt = (new MatOfPoint2f (points)).reshape (1, 2 * n))
				using (Mat S = calc_simil (pt))
				using (Mat Si = inv_simil (S)) {
						Point[] pts = apply_simil (Si, points);

						for (int i = 0; i < n; i++) {

								OpenCVForUnity.Size wsize = new OpenCVForUnity.Size (ssize.width + patches [i].patch_size ().width, ssize.height + patches [i].patch_size ().height);
								using (Mat A = new Mat (2, 3, CvType.CV_32F)) {
										A.put (0, 0, S.get (0, 0) [0]);
										A.put (0, 1, S.get (0, 1) [0]);
										A.put (1, 0, S.get (1, 0) [0]);
										A.put (1, 1, S.get (1, 1) [0]);
										A.put (0, 2, pt.get (2 * i, 0) [0] - 
												(A.get (0, 0) [0] * (wsize.width - 1) / 2 + A.get (0, 1) [0] * (wsize.height - 1) / 2));
										A.put (1, 2, pt.get (2 * i + 1, 0) [0] - 
												(A.get (1, 0) [0] * (wsize.width - 1) / 2 + A.get (1, 1) [0] * (wsize.height - 1) / 2));
										using (Mat I = new Mat ()) {
												Imgproc.warpAffine (im, I, A, wsize, Imgproc.INTER_LINEAR + Imgproc.WARP_INVERSE_MAP);
												using (Mat R = patches [i].calc_response (I, false)) {
			
														Core.MinMaxLocResult minMaxLocResult = Core.minMaxLoc (R);
														pts [i].x = pts [i].x + minMaxLocResult.maxLoc.x - 0.5 * ssize.width;
														pts [i].y = pts [i].y + minMaxLocResult.maxLoc.y - 0.5 * ssize.height;
												}
										}
								}

						}

						return apply_simil (S, pts);
				}
		}
开发者ID:Thecontrarian,项目名称:unity-blink-detection,代码行数:39,代码来源:PatchModels.cs

示例9: convertScreenPoint

        /// <summary>
        /// Converts the screen point.
        /// </summary>
        /// <returns>The screen point.</returns>
        /// <param name="screenPoint">Screen point.</param>
        /// <param name="quad">Quad.</param>
        /// <param name="cam">Cam.</param>
        static Point convertScreenPoint(Point screenPoint, GameObject quad, Camera cam)
        {
            Vector2 tl;
                        Vector2 tr;
                        Vector2 br;
                        Vector2 bl;

                        tl = cam.WorldToScreenPoint (new Vector3 (quad.transform.localPosition.x - quad.transform.localScale.x / 2, quad.transform.localPosition.y + quad.transform.localScale.y / 2, quad.transform.localPosition.z));
                        tr = cam.WorldToScreenPoint (new Vector3 (quad.transform.localPosition.x + quad.transform.localScale.x / 2, quad.transform.localPosition.y + quad.transform.localScale.y / 2, quad.transform.localPosition.z));
                        br = cam.WorldToScreenPoint (new Vector3 (quad.transform.localPosition.x + quad.transform.localScale.x / 2, quad.transform.localPosition.y - quad.transform.localScale.y / 2, quad.transform.localPosition.z));
                        bl = cam.WorldToScreenPoint (new Vector3 (quad.transform.localPosition.x - quad.transform.localScale.x / 2, quad.transform.localPosition.y - quad.transform.localScale.y / 2, quad.transform.localPosition.z));

                        Mat srcRectMat = new Mat (4, 1, CvType.CV_32FC2);
                        Mat dstRectMat = new Mat (4, 1, CvType.CV_32FC2);

                        srcRectMat.put (0, 0, tl.x, tl.y, tr.x, tr.y, br.x, br.y, bl.x, bl.y);
                        dstRectMat.put (0, 0, 0.0, 0.0, quad.transform.localScale.x, 0.0, quad.transform.localScale.x, quad.transform.localScale.y, 0.0, quad.transform.localScale.y);

                        Mat perspectiveTransform = Imgproc.getPerspectiveTransform (srcRectMat, dstRectMat);

                        //						Debug.Log ("srcRectMat " + srcRectMat.dump ());
                        //						Debug.Log ("dstRectMat " + dstRectMat.dump ());
                        //						Debug.Log ("perspectiveTransform " + perspectiveTransform.dump ());

                        MatOfPoint2f srcPointMat = new MatOfPoint2f (screenPoint);
                        MatOfPoint2f dstPointMat = new MatOfPoint2f ();

                        Core.perspectiveTransform (srcPointMat, dstPointMat, perspectiveTransform);

                        //						Debug.Log ("srcPointMat " + srcPointMat.dump ());
                        //						Debug.Log ("dstPointMat " + dstPointMat.dump ());

                        return dstPointMat.toArray () [0];
        }
开发者ID:ygx2011,项目名称:OpenCVForUnity,代码行数:41,代码来源:HandPoseEstimationSample.cs

示例10: OnWebCamTextureToMatHelperInited

        /// <summary>
        /// Raises the web cam texture to mat helper inited event.
        /// </summary>
        public void OnWebCamTextureToMatHelperInited()
        {
            Debug.Log ("OnWebCamTextureToMatHelperInited");

            Mat webCamTextureMat = webCamTextureToMatHelper.GetMat ();

            texture = new Texture2D (webCamTextureMat.cols (), webCamTextureMat.rows (), TextureFormat.RGBA32, false);

            gameObject.GetComponent<Renderer> ().material.mainTexture = texture;

            gameObject.transform.localScale = new Vector3 (webCamTextureMat.cols (), webCamTextureMat.rows (), 1);
            Debug.Log ("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float width = webCamTextureMat.width ();
            float height = webCamTextureMat.height ();

            float widthScale = (float)Screen.width / width;
            float heightScale = (float)Screen.height / height;
            if (widthScale < heightScale) {
                Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
            } else {
                Camera.main.orthographicSize = height / 2;
            }

            Mat downScaleMat = webCamTextureToMatHelper.GetDownScaleMat (webCamTextureMat);

            gray1Mat = new Mat (downScaleMat.rows (), downScaleMat.cols (), CvType.CV_8UC1);
            gray2Mat = new Mat (downScaleMat.rows (), downScaleMat.cols (), CvType.CV_8UC1);

            int ksize = 7;
            float[] kernelData = new float[ksize * ksize];
            for (int i = 0; i < kernelData.Length; i++) {
                if (i == kernelData.Length / 2) {
                    kernelData [i] = (-(kernelData.Length - 1));
                } else {
                    kernelData [i] = 1;
                }
            }
            kernel = new Mat (ksize, ksize, CvType.CV_32F);
            kernel.put (0, 0, kernelData);

            byteArray = new byte[downScaleMat.width () * downScaleMat.height ()];

            subdiv = new Subdiv2D ();
        }
开发者ID:EnoxSoftware,项目名称:OpenCVForUnity,代码行数:48,代码来源:PolygonFilterSample.cs

示例11: read

	public void read (object root_json)
	{
		IDictionary detector_json = (IDictionary)root_json;

		detector_fname = (string)detector_json ["fname"];
//				Debug.Log ("detector_fname " + detector_fname);
				
		
		detector_offset = new Vector3 ((float)(double)detector_json ["x offset"], (float)(double)detector_json ["y offset"], (float)(double)detector_json ["z offset"]);
//				Debug.Log ("detector_offset " + detector_offset.ToString ());
		

		IDictionary reference_json = (IDictionary)detector_json ["reference"];

		
		reference = new Mat ((int)(long)reference_json ["rows"], (int)(long)reference_json ["cols"], CvType.CV_32F);
//				Debug.Log ("reference " + reference.ToString ());
		
		IList data_json = (IList)reference_json ["data"];
		float[] data = new float[reference.rows () * reference.cols ()];
		for (int i = 0; i < data_json.Count; i++) {
			data [i] = (float)(double)data_json [i];
		}
		reference.put (0, 0, data);
//				Debug.Log ("reference dump " + reference.dump ());
		
		detector = new CascadeClassifier (Utils.getFilePath (detector_fname));
		//				detector = new CascadeClassifier (System.IO.Path.Combine (Application.streamingAssetsPath, detector_fname));
	}
开发者ID:mosnyder,项目名称:facerace,代码行数:29,代码来源:FaceDetector.cs

示例12: Start

				// Use this for initialization
				void Start ()
				{

						gameObject.transform.localScale = new Vector3 (imgTexture.width, imgTexture.height, 1);
						Camera.main.orthographicSize = imgTexture.height / 2;

		
						Mat imgMat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC4);
		
						Utils.texture2DToMat (imgTexture, imgMat);
						Debug.Log ("imgMat dst ToString " + imgMat.ToString ());

						//set cameraparam
						int max_d = Mathf.Max (imgMat.rows (), imgMat.cols ());
						Mat camMatrix = new Mat (3, 3, CvType.CV_64FC1);
						camMatrix.put (0, 0, max_d);
						camMatrix.put (0, 1, 0);
						camMatrix.put (0, 2, imgMat.cols () / 2.0f);
						camMatrix.put (1, 0, 0);
						camMatrix.put (1, 1, max_d);
						camMatrix.put (1, 2, imgMat.rows () / 2.0f);
						camMatrix.put (2, 0, 0);
						camMatrix.put (2, 1, 0);
						camMatrix.put (2, 2, 1.0f);
						Debug.Log ("camMatrix " + camMatrix.dump ());

						MatOfDouble distCoeffs = new MatOfDouble (0, 0, 0, 0);
						Debug.Log ("distCoeffs " + distCoeffs.dump ());


						//calibration camera
						Size imageSize = new Size (imgMat.cols (), imgMat.rows ());
						double apertureWidth = 0;
						double apertureHeight = 0;
						double[] fovx = new double[1];
						double[] fovy = new double[1];
						double[] focalLength = new double[1];
						Point principalPoint = new Point ();
						double[] aspectratio = new double[1];
		
						Calib3d.calibrationMatrixValues (camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio);
		
						Debug.Log ("imageSize " + imageSize.ToString ());
						Debug.Log ("apertureWidth " + apertureWidth);
						Debug.Log ("apertureHeight " + apertureHeight);
						Debug.Log ("fovx " + fovx [0]);
						Debug.Log ("fovy " + fovy [0]);
						Debug.Log ("focalLength " + focalLength [0]);
						Debug.Log ("principalPoint " + principalPoint.ToString ());
						Debug.Log ("aspectratio " + aspectratio [0]);

						//Adjust Unity Camera FOV
						ARCamera.fieldOfView = (float)fovy [0];

//			ARCamera.projectionMatrix = ARCamera.projectionMatrix * Matrix4x4.Scale(new Vector3(-1, -1, 1));
//			gameObject.transform.localScale = new Vector3 (-1 * gameObject.transform.localScale.x, -1 * gameObject.transform.localScale.y, 1);


		 
						MarkerDetector markerDetector = new MarkerDetector (camMatrix, distCoeffs, markerDesign);

						markerDetector.processFrame (imgMat, 1);


						//Marker Coordinate Initial Matrix
						Matrix4x4 lookAtM = getLookAtMatrix (new Vector3 (0, 0, 0), new Vector3 (0, 0, 1), new Vector3 (0, -1, 0));
						Debug.Log ("lookAt " + lookAtM.ToString ());

						//Marker to Camera Coordinate System Convert Matrix
						if (markerDetector.getTransformations ().Count > 0) {
								Matrix4x4 transformationM = markerDetector.getTransformations () [0];
								Debug.Log ("transformationM " + transformationM.ToString ());

								//OpenGL to Unity Coordinate System Convert Matrix
								//http://docs.unity3d.com/ScriptReference/Camera-worldToCameraMatrix.html that camera space matches OpenGL convention: camera's forward is the negative Z axis. This is different from Unity's convention, where forward is the positive Z axis. 
								Matrix4x4 invertZM = Matrix4x4.TRS (Vector3.zero, Quaternion.identity, new Vector3 (1, 1, -1));
								Debug.Log ("invertZM " + invertZM.ToString ());

								Matrix4x4 worldToCameraM = lookAtM * transformationM * invertZM;
								Debug.Log ("worldToCameraM " + worldToCameraM.ToString ());

								ARCamera.worldToCameraMatrix = worldToCameraM;
		
						} else {
								Debug.LogWarning ("Marker is not detected");
						}



						Texture2D texture = new Texture2D (imgMat.cols (), imgMat.rows (), TextureFormat.RGBA32, false);
		
						Utils.matToTexture2D (imgMat, texture);
		
						gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
				}
开发者ID:wlstks7,项目名称:MarkerBasedARSample,代码行数:96,代码来源:Texture2DMarkerBasedARSample.cs

示例13: init

        private IEnumerator init()
        {
            axes.SetActive (false);
                        head.SetActive (false);
                        rightEye.SetActive (false);
                        leftEye.SetActive (false);
                        mouth.SetActive (false);

                        if (webCamTexture != null) {
                                faceTracker.reset ();

                                webCamTexture.Stop ();
                                initDone = false;

                                rgbaMat.Dispose ();
                                grayMat.Dispose ();
                                cascade.Dispose ();
                                camMatrix.Dispose ();
                                distCoeffs.Dispose ();

                        }

                        // Checks how many and which cameras are available on the device
                        for (int cameraIndex = 0; cameraIndex < WebCamTexture.devices.Length; cameraIndex++) {

                                if (WebCamTexture.devices [cameraIndex].isFrontFacing == isFrontFacing) {

                                        Debug.Log (cameraIndex + " name " + WebCamTexture.devices [cameraIndex].name + " isFrontFacing " + WebCamTexture.devices [cameraIndex].isFrontFacing);

                                        webCamDevice = WebCamTexture.devices [cameraIndex];

                                        webCamTexture = new WebCamTexture (webCamDevice.name, width, height);

                                        break;
                                }
                        }

                        if (webCamTexture == null) {
                                webCamDevice = WebCamTexture.devices [0];
                                webCamTexture = new WebCamTexture (webCamDevice.name, width, height);
                        }

                        Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);

                        // Starts the camera
                        webCamTexture.Play ();

                        while (true) {
                                //If you want to use webcamTexture.width and webcamTexture.height on iOS, you have to wait until webcamTexture.didUpdateThisFrame == 1, otherwise these two values will be equal to 16. (http://forum.unity3d.com/threads/webcamtexture-and-error-0x0502.123922/)
                                #if UNITY_IOS && !UNITY_EDITOR && (UNITY_4_6_3 || UNITY_4_6_4 || UNITY_5_0_0 || UNITY_5_0_1)
                                if (webCamTexture.width > 16 && webCamTexture.height > 16) {
                                #else
                                if (webCamTexture.didUpdateThisFrame) {
                                        #endif
                                        Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);
                                        Debug.Log ("videoRotationAngle " + webCamTexture.videoRotationAngle + " videoVerticallyMirrored " + webCamTexture.videoVerticallyMirrored + " isFrongFacing " + webCamDevice.isFrontFacing);

                                        colors = new Color32[webCamTexture.width * webCamTexture.height];

                                        rgbaMat = new Mat (webCamTexture.height, webCamTexture.width, CvType.CV_8UC4);
                                        grayMat = new Mat (webCamTexture.height, webCamTexture.width, CvType.CV_8UC1);

                                        texture = new Texture2D (webCamTexture.width, webCamTexture.height, TextureFormat.RGBA32, false);

                                        cascade = new CascadeClassifier (Utils.getFilePath ("haarcascade_frontalface_alt.xml"));
                                        if (cascade.empty ()) {
                                                Debug.LogError ("cascade file is not loaded.Please copy from “FaceTrackerSample/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
                                        }

                                        gameObject.transform.localScale = new Vector3 (webCamTexture.width, webCamTexture.height, 1);

                                        gameObject.transform.localEulerAngles = new Vector3 (0, 0, 0);
            //										gameObject.transform.rotation = gameObject.transform.rotation * Quaternion.AngleAxis (webCamTexture.videoRotationAngle, Vector3.back);

            //										bool _videoVerticallyMirrored = webCamTexture.videoVerticallyMirrored;
            //										float scaleX = 1;
            //										float scaleY = _videoVerticallyMirrored ? -1.0f : 1.0f;
            //										gameObject.transform.localScale = new Vector3 (scaleX * gameObject.transform.localScale.x, scaleY * gameObject.transform.localScale.y, 1);

                                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;

                                        Camera.main.orthographicSize = webCamTexture.height / 2;

                                        int max_d = Mathf.Max (rgbaMat.rows (), rgbaMat.cols ());
                                        camMatrix = new Mat (3, 3, CvType.CV_64FC1);
                                        camMatrix.put (0, 0, max_d);
                                        camMatrix.put (0, 1, 0);
                                        camMatrix.put (0, 2, rgbaMat.cols () / 2.0f);
                                        camMatrix.put (1, 0, 0);
                                        camMatrix.put (1, 1, max_d);
                                        camMatrix.put (1, 2, rgbaMat.rows () / 2.0f);
                                        camMatrix.put (2, 0, 0);
                                        camMatrix.put (2, 1, 0);
                                        camMatrix.put (2, 2, 1.0f);

                                        Size imageSize = new Size (rgbaMat.cols (), rgbaMat.rows ());
                                        double apertureWidth = 0;
                                        double apertureHeight = 0;
                                        double[] fovx = new double[1];
                                        double[] fovy = new double[1];
//.........这里部分代码省略.........
开发者ID:behiever,项目名称:FaceTrackerSample,代码行数:101,代码来源:FaceTrackerARSample.cs

示例14: OnWebCamTextureToMatHelperInited

				/// <summary>
				/// Raises the web cam texture to mat helper inited event.
				/// </summary>
				public void OnWebCamTextureToMatHelperInited ()
				{
						Debug.Log ("OnWebCamTextureToMatHelperInited");
			
						Mat webCamTextureMat = webCamTextureToMatHelper.GetMat ();
			
						colors = new Color32[webCamTextureMat.cols () * webCamTextureMat.rows ()];
						texture = new Texture2D (webCamTextureMat.cols (), webCamTextureMat.rows (), TextureFormat.RGBA32, false);



						gameObject.transform.localScale = new Vector3 (webCamTextureMat.cols (), webCamTextureMat.rows (), 1);
			
						Debug.Log ("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);
			
						float width = 0;
						float height = 0;
			
						width = gameObject.transform.localScale.x;
						height = gameObject.transform.localScale.y;

						float imageScale = 1.0f;
						float widthScale = (float)Screen.width / width;
						float heightScale = (float)Screen.height / height;
						if (widthScale < heightScale) {
								Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
								imageScale = (float)Screen.height / (float)Screen.width;
						} else {
								Camera.main.orthographicSize = height / 2;
						}
			
						gameObject.GetComponent<Renderer> ().material.mainTexture = texture;




						grayMat = new Mat (webCamTextureMat.rows (), webCamTextureMat.cols (), CvType.CV_8UC1);
									
						cascade = new CascadeClassifier (Utils.getFilePath ("haarcascade_frontalface_alt.xml"));
						if (cascade.empty ()) {
								Debug.LogError ("cascade file is not loaded.Please copy from “FaceTrackerSample/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
						}
									
									
						int max_d = Mathf.Max (webCamTextureMat.rows (), webCamTextureMat.cols ());
						camMatrix = new Mat (3, 3, CvType.CV_64FC1);
						camMatrix.put (0, 0, max_d);
						camMatrix.put (0, 1, 0);
						camMatrix.put (0, 2, webCamTextureMat.cols () / 2.0f);
						camMatrix.put (1, 0, 0);
						camMatrix.put (1, 1, max_d);
						camMatrix.put (1, 2, webCamTextureMat.rows () / 2.0f);
						camMatrix.put (2, 0, 0);
						camMatrix.put (2, 1, 0);
						camMatrix.put (2, 2, 1.0f);
									
						Size imageSize = new Size (webCamTextureMat.cols () * imageScale, webCamTextureMat.rows () * imageScale);
						double apertureWidth = 0;
						double apertureHeight = 0;
						double[] fovx = new double[1];
						double[] fovy = new double[1];
						double[] focalLength = new double[1];
						Point principalPoint = new Point ();
						double[] aspectratio = new double[1];
									
									
									
									
						Calib3d.calibrationMatrixValues (camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio);
									
						Debug.Log ("imageSize " + imageSize.ToString ());
						Debug.Log ("apertureWidth " + apertureWidth);
						Debug.Log ("apertureHeight " + apertureHeight);
						Debug.Log ("fovx " + fovx [0]);
						Debug.Log ("fovy " + fovy [0]);
						Debug.Log ("focalLength " + focalLength [0]);
						Debug.Log ("principalPoint " + principalPoint.ToString ());
						Debug.Log ("aspectratio " + aspectratio [0]);
									
									
						if (Screen.height > Screen.width) {
								ARCamera.fieldOfView = (float)fovx [0];
						} else {
								ARCamera.fieldOfView = (float)fovy [0];
						}

									
						Debug.Log ("camMatrix " + camMatrix.dump ());
									
									
						distCoeffs = new MatOfDouble (0, 0, 0, 0);
						Debug.Log ("distCoeffs " + distCoeffs.dump ());
									
									
									
						lookAtM = getLookAtMatrix (new Vector3 (0, 0, 0), new Vector3 (0, 0, 1), new Vector3 (0, -1, 0));
						Debug.Log ("lookAt " + lookAtM.ToString ());
//.........这里部分代码省略.........
开发者ID:mosnyder,项目名称:facerace,代码行数:101,代码来源:FaceTrackerARSample.cs

示例15: inv_simil

		Mat inv_simil (Mat S)
		{
				Mat Si = new Mat (2, 3, CvType.CV_32F);
				float d = (float)S.get (0, 0) [0] * (float)S.get (1, 1) [0] - (float)S.get (1, 0) [0] * (float)S.get (0, 1) [0];

				Si.put (0, 0, S.get (1, 1) [0] / d);
				Si.put (0, 1, -S.get (0, 1) [0] / d);
				Si.put (1, 1, S.get (0, 0) [0] / d);
				Si.put (1, 0, -S.get (1, 0) [0] / d);

				Mat Ri = new Mat (Si, new OpenCVForUnity.Rect (0, 0, 2, 2));


				Mat negaRi = new Mat ();
				Core.multiply (Ri, new Scalar (-1), negaRi);
				Mat t = new Mat ();
				Core.gemm (negaRi, S.col (2), 1, new Mat (negaRi.rows (), negaRi.cols (), negaRi.type ()), 0, t);

				Mat St = Si.col (2);
				t.copyTo (St);

				return Si;
		}
开发者ID:Thecontrarian,项目名称:unity-blink-detection,代码行数:23,代码来源:PatchModels.cs


注:本文中的Mat.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。