当前位置: 首页>>代码示例>>C++>>正文


C++ cv::Mat方法代码示例

本文整理汇总了C++中cv::Mat方法的典型用法代码示例。如果您正苦于以下问题:C++ cv::Mat方法的具体用法?C++ cv::Mat怎么用?C++ cv::Mat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv的用法示例。


在下文中一共展示了cv::Mat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: ifs

bool DatasetCIFAR10::loadBatch(std::string filename)
{
	std::ifstream ifs(filename);

	if (not ifs.good()) {
		std::cerr << "(loadImages) ERROR cannot open file: " << filename << std::endl;
		return false;
	}

	while (not ifs.eof()) {
		char lbl = 0;
		ifs.read(&lbl, 1);
		char buffer[imgSize * 3];
		ifs.read(buffer, imgSize * 3);
		if (ifs.eof())
			break;
		// load channels, convert to float, normalize
		cv::Size size(imgRows, imgCols);
		std::vector<Mat> mats({Mat(size, CV_8UC1, buffer, Mat::AUTO_STEP),
		                       Mat(size, CV_8UC1, buffer + imgSize, Mat::AUTO_STEP),
		                       Mat(size, CV_8UC1, buffer + 2 * imgSize, Mat::AUTO_STEP)});
		Mat img(size, CV_8UC3);
		cv::merge(mats, img);
		img.convertTo(img, CV_64FC3);
		img = img / 255.0;
		img = img.reshape(1, imgRows);

		labels.push_back(lbl);
		images.push_back(img);
	}
	size = images.size();
	return true;
}
开发者ID:hnnsngl,项目名称:learning-sandbox,代码行数:33,代码来源:loadCIFAR10.hpp

示例2: asin

TEST(SupervisedDescentOptimiser, SinConvergence) {
	// sin(x):
	auto h = [](Mat value, size_t, int) { return std::sin(value.at<float>(0)); };
	auto h_inv = [](float value) {
		if (value >= 1.0f) // our upper border of y is 1.0f, but it can be a bit larger due to floating point representation. asin then returns NaN.
			return std::asin(1.0f);
		else
			return std::asin(value);
		};

	float startInterval = -1.0f; float stepSize = 0.2f; int numValues = 11; Mat y_tr(numValues, 1, CV_32FC1); // sin: [-1:0.2:1]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	SupervisedDescentOptimiser<LinearRegressor<>> sdo({ LinearRegressor<>() });
	
	// Test the callback mechanism as well: (better move to a separate unit test?)
	auto checkResidual = [&](const Mat& currentX) {
		double residual = cv::norm(currentX, x_tr, cv::NORM_L2) / cv::norm(x_tr, cv::NORM_L2);
		EXPECT_DOUBLE_EQ(0.21369851877468238, residual);
	};

	sdo.train(x_tr, x0, y_tr, h, checkResidual);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_DOUBLE_EQ(0.21369851877468238, trainingResidual);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -1.0f; float stepSizeTest = 0.05f; int numValuesTest = 41; Mat y_ts(numValuesTest, 1, CV_32FC1); // sin: [-1:0.05:1]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.
	
	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.1800101229, testResidual, 0.0000000003);
}
开发者ID:23119841,项目名称:superviseddescent,代码行数:60,代码来源:test_SupervisedDescentOptimiser.cpp

示例3: x_tr

TEST(SupervisedDescentOptimiser, XCubeConvergenceCascade) {
	// x^3:
	auto h = [](Mat value, size_t, int) { return static_cast<float>(std::pow(value.at<float>(0), 3)); };
	auto h_inv = [](float value) { return std::cbrt(value); }; // cubic root

	float startInterval = -27.0f; float stepSize = 3.0f; int numValues = 19; Mat y_tr(numValues, 1, CV_32FC1); // cube: [-27:3:27]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	vector<LinearRegressor<>> regressors(10);
	SupervisedDescentOptimiser<LinearRegressor<>> sdo(regressors);
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.04312725, trainingResidual, 0.00000002);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -27.0f; float stepSizeTest = 0.5f; int numValuesTest = 109; Mat y_ts(numValuesTest, 1, CV_32FC1); // cube: [-27:0.5:27]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.05889855, testResidual, 0.00000002);
}
开发者ID:23119841,项目名称:superviseddescent,代码行数:49,代码来源:test_SupervisedDescentOptimiser.cpp

示例4: pixelToWorld

Point3d Camera::pixelToWorld(const Point2d& pixel) const
{
	double im[2] = {pixel.x, pixel.y};
	double wl[3];
	double z;

	// setup intrinsic and extrinsic parameters
	CvMat img_frm = cvMat(1, 1, CV_64FC2, im);
	Mat world_frm = Mat(T_ROWS, T_COLS, TYPE, wl);

	// convert from distorted pixels to normalized camera frame
	// cv:: version does not allow doubles for some odd reason,
	// so use the C version
	CvMat A = _A;
	CvMat k = _k;
	cvUndistortPoints(&img_frm, &img_frm, &A, &k);
	
		// convert from camera frame to world frame
	/*z = _t(2, 0) ? _t(2, 0) : 1; // Wrong z != t3. z = r31 X + r32 Y + t3
	wl[0] = z*im[0] - _t(0, 0);
	wl[1] = z*im[1] - _t(1, 0);
	wl[2] = 0;
	world_frm = _R.t() * world_frm;
    */

	wl[0] = (_R(1,1) * _t(0,0)-_R(0,1) * _t(1,0)+_R(2,1) * _t(1,0) * im[0]- _R(1,1)* _t(2,0) * im[0]-_R(2,1)* _t(0,0)* im[1]+_R(0,1) * _t(2,0) * im[1])/(_R(0,1) * _R(1,0)-_R(0,0)* _R(1,1)+_R(1,1)* _R(2,0) *im[0]-_R(1,0) *_R(2,1) *im[0]-_R(0,1) *_R(2,0) * im[1]+_R(0,0) *_R(2,1) *im[1]);
	wl[1] = (-_R(0,0) * _t(1,0)+_R(2,0)* _t(1,0)*im[0]+_R(1,0) *(_t(0,0)-_t(2,0)*im[0])-_R(2,0) * _t(0,0)*im[1]+_R(0,0) * _t(2,0) * im[1])/(-_R(0,1) * _R(1,0)+_R(0,0) * _R(1,1)-_R(1,1)*_R(2,0) *im[0]+_R(1,0) * _R(2,1) * im[0]+_R(0,1) * _R(2,0) * im[1]-_R(0,0) * _R(2,1) *im[1]);
	wl[2] = 0;

	return Point3d(wl[0], wl[1], wl[2]);
}
开发者ID:JoshMarino,项目名称:lims-hsv-system,代码行数:31,代码来源:Camera.cpp

示例5: getOpenCVMat

Mat* MyImage::getOpenCVMat(){
  if(mat == NULL)  {
    try {
      Mat orig;
      getMagickImage();
      magick.magick("BGR");
      Blob blb ;
      magick.write(&blb);
      mat = new Mat();
      orig = Mat(magick.size().height(),
  	       magick.size().width(), 
  	       CV_8UC3, 
  	       (void *) blb.data());
      cvtColor(orig, *mat, CV_RGB2HSV);
      magick.magick("RGB");
    }
    catch (Magick::Exception &e)
    {
      cout << "Caught a magick exception: " << e.what() << endl;
      throw e;
    }
  }
  return mat;

}
开发者ID:varungupta181186,项目名称:vistool,代码行数:25,代码来源:myImage.cpp

示例6: fit_shape_model

/**
 * Convenience function that fits the shape model and expression blendshapes to
 * landmarks. Makes the fitted PCA shape and blendshape coefficients accessible
 * via the out parameters \p pca_shape_coefficients and \p blendshape_coefficients.
 * It iterates PCA-shape and blendshape fitting until convergence
 * (usually it converges within 5 to 10 iterations).
 *
 * See fit_shape_model(cv::Mat, eos::morphablemodel::MorphableModel, std::vector<eos::morphablemodel::Blendshape>, std::vector<cv::Vec2f>, std::vector<int>, float lambda)
 * for a simpler overload that just returns the shape instance.
 *
 * @param[in] affine_camera_matrix The estimated pose as a 3x4 affine camera matrix that is used to fit the shape.
 * @param[in] morphable_model The 3D Morphable Model used for the shape fitting.
 * @param[in] blendshapes A vector of blendshapes that are being fit to the landmarks in addition to the PCA model.
 * @param[in] image_points 2D landmarks from an image to fit the model to.
 * @param[in] vertex_indices The vertex indices in the model that correspond to the 2D points.
 * @param[in] lambda Regularisation parameter of the PCA shape fitting.
 * @param[out] pca_shape_coefficients Output parameter that will contain the resulting pca shape coefficients.
 * @param[out] blendshape_coefficients Output parameter that will contain the resulting blendshape coefficients.
 * @return The fitted model shape instance.
 */
cv::Mat fit_shape_model(cv::Mat affine_camera_matrix, eos::morphablemodel::MorphableModel morphable_model, std::vector<eos::morphablemodel::Blendshape> blendshapes, std::vector<cv::Vec2f> image_points, std::vector<int> vertex_indices, float lambda, std::vector<float>& pca_shape_coefficients, std::vector<float>& blendshape_coefficients)
{
	using cv::Mat;
	
	Mat blendshapes_as_basis(blendshapes[0].deformation.rows, blendshapes.size(), CV_32FC1); // assert blendshapes.size() > 0 and all of them have same number of rows, and 1 col
	for (int i = 0; i < blendshapes.size(); ++i)
	{
		blendshapes[i].deformation.copyTo(blendshapes_as_basis.col(i));
	}

	std::vector<float> last_blendshape_coeffs, current_blendshape_coeffs; 
	std::vector<float> last_pca_coeffs, current_pca_coeffs;
	current_blendshape_coeffs.resize(blendshapes.size()); // starting values t_0, all zeros
	current_pca_coeffs.resize(morphable_model.get_shape_model().get_num_principal_components()); // starting values, all zeros
	Mat combined_shape;

	do // run at least once:
	{
		last_blendshape_coeffs = current_blendshape_coeffs;
		last_pca_coeffs = current_pca_coeffs;
		// Estimate the PCA shape coefficients with the current blendshape coefficients (0 in the first iteration):
		Mat mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Mat(last_blendshape_coeffs);
		current_pca_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_camera_matrix, image_points, vertex_indices, mean_plus_blendshapes, lambda);

		// Estimate the blendshape coefficients with the current PCA model estimate:
		Mat pca_model_shape = morphable_model.get_shape_model().draw_sample(current_pca_coeffs);
		current_blendshape_coeffs = eos::fitting::fit_blendshapes_to_landmarks_linear(blendshapes, pca_model_shape, affine_camera_matrix, image_points, vertex_indices, 0.0f);

		combined_shape = pca_model_shape + blendshapes_as_basis * Mat(current_blendshape_coeffs);
	} while (std::abs(cv::norm(current_pca_coeffs) - cv::norm(last_pca_coeffs)) >= 0.01 || std::abs(cv::norm(current_blendshape_coeffs) - cv::norm(last_blendshape_coeffs)) >= 0.01);
	
	pca_shape_coefficients = current_pca_coeffs;
	blendshape_coefficients = current_blendshape_coeffs;
	return combined_shape;
};
开发者ID:382309009,项目名称:eos,代码行数:55,代码来源:fitting.hpp

示例7: montageList

cv::Mat montageList(const Dataset &dataset, const std::vector<std::pair<int, int>> &list,
                    int tiles_per_row = 30)
{
	using cv::Mat;

	const int count = list.size();
	const int xMargin = 2;
	const int yMargin = 14;

	const int width = dataset.imgCols + xMargin;
	const int height = dataset.imgRows + yMargin;

	assert(dataset.images.size() > 0);
	const int type = dataset.images[0].type();

	if (list.size() == 0)
		return Mat(0, 0, type);

	Mat mat = Mat::ones((count / tiles_per_row + 1) * height, tiles_per_row * width, type);

	for (size_t i = 0; i < list.size(); i++) {
		int x = (i % tiles_per_row) * width;
		int y = (i / tiles_per_row) * height;
		int id = list[i].first;
		dataset.images[id].copyTo(mat(cv::Rect(x, y, dataset.imgCols, dataset.imgRows)));

		std::string label =
		    std::to_string(list[i].second) + "/" + std::to_string(dataset.labels[id]);

		cv::putText(mat, label, cv::Point(x, y + height - 2), cv::FONT_HERSHEY_SIMPLEX, 0.4,
		            cv::Scalar({0.0, 0.0, 0.0, 0.0}));
	}
	return mat;
}
开发者ID:hnnsngl,项目名称:learning-sandbox,代码行数:34,代码来源:montagelist.hpp

示例8:

void CaffeClassifier::Impl::FillBlob(const vector<Mat>& images,
                                     Blobf* blob)
{
    // Check that net is configured to use a proper batch size.
    CV_Assert(static_cast<size_t>(data_blob->shape(0)) == images.size());
    float* blob_data = blob->mutable_cpu_data();
    for (size_t i = 0; i < images.size(); ++i)
    {
        Mat image = images[i];
        // Check that all other dimentions of blob and image match.
        CV_Assert(blob->shape(1) == image.channels());
        CV_Assert(blob->shape(2) == image.rows);
        CV_Assert(blob->shape(3) == image.cols);

        Mat image_float = image;
        if (image.type() != CV_32F) {
            image.convertTo(image_float, CV_32F);
        }

        vector<Mat> image_channels;
        for (int j = 0; j < image.channels(); ++j)
        {
            image_channels.push_back(Mat(image.size(), CV_32F,
                                         blob_data + blob->offset(i, j)));
        }
        cv::split(image_float, image_channels);
    }
}
开发者ID:ekaterinaMaljutina,项目名称:DNN_based_detection,代码行数:28,代码来源:caffe_classifier.cpp

示例9: LOGD

/*
 * Class:     pt_chambino_p_pulse_Pulse_Face
 * Method:    _box
 * Signature: (JJ)V
 */
JNIEXPORT void JNICALL Java_pt_chambino_p_pulse_Pulse_00024Face__1box
  (JNIEnv *jenv, jclass, jlong self, jlong mat)
  {
    LOGD("Java_pt_chambino_p_pulse_Pulse_00024Face__1box enter");
    try
    {
        if (self) {
            vector<Rect> v;
            v.push_back(((Pulse::Face*)self)->evm.box);
            *((Mat*)mat) = Mat(v, true);
        }
    }
    catch(cv::Exception& e)
    {
        jclass je = jenv->FindClass("org/opencv/core/CvException");
        if(!je) je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, e.what());
    }
    catch (...)
    {
        jclass je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, "Unknown exception in JNI code.");
    }
    LOGD("Java_pt_chambino_p_pulse_Pulse_00024Face__1box exit");
  }
开发者ID:Nuzhny007,项目名称:pulse,代码行数:30,代码来源:pt_chambino_p_pulse_Pulse_Face.cpp

示例10: transpose

    static void transpose(Type* data, int height, int width) {
#if HAS_IMGLIB
        int elemType;
        if (sizeof(Type) == 1) {
            elemType = CV_8UC1;
        } else if (sizeof(Type) == 4) {
            width /= 4;
            elemType = CV_32F;
        } else {
            throw std::runtime_error("Unsupported type in transpose\n");
        }
        Mat input = Mat(height, width, elemType, data).clone();
        Mat output = Mat(width, height, elemType, data);
        cv::transpose(input, output);
#else
#warning ("OpenCV support not built-in")
        string message = "OpenCV " UNSUPPORTED_MEDIA_MESSAGE;
        throw std::runtime_error(message);
#endif
    }
开发者ID:AnnaZhou,项目名称:neon,代码行数:20,代码来源:matrix.hpp

示例11: histCalculate

//计算直方图;
Mat  KswSegment::histCalculate(const Mat& src)
{
	Mat hist;
	int bins = 256;
	int histSize[] = {bins};
	float range[] = {0, 256};
	const float* ranges[] = {range};
	int channels[] = {0};
	calcHist(&src, 1, channels, Mat(), hist, 1, histSize, ranges, true, false);//计算原图像直方图
	return hist;
}
开发者ID:sxxlearn2rock,项目名称:SissorActionTest,代码行数:12,代码来源:KswSegment.cpp

示例12: readExtrinsics

bool Calibration::readExtrinsics(const string& file, Mat& R, Mat& t)
{
	CvMat* _R = NULL;
	CvMat* _t = NULL;

	if(file.empty()) {
		return false;
	}

	FileStorage fs(file, FileStorage::READ);
	if(fs.isOpened()) {
		_R = static_cast<CvMat*> (
			fs["Rotation Matrix"].readObj());
		_t = static_cast<CvMat*> 
			(fs["Translation Vector"].readObj());
	}

	R = _R == NULL ? Mat() : Mat(_R);
	t = _t == NULL ? Mat() : Mat(_t);

	return !R.empty() && !t.empty();
}
开发者ID:JoshMarino,项目名称:lims-hsv-system,代码行数:22,代码来源:Calibration.cpp

示例13: getExtrinsics

double Calibration::getExtrinsics(const Mat& Tr)
{
	// set/create various parameters
	Mat rvec;
	Mat w(views.world.front());
	Mat p(views.pixel.front());

	Mat& A = intrinsic_params.A;
	Mat& k = intrinsic_params.k;
	Mat& R = extrinsic_params.R;
	Mat& t = extrinsic_params.t;

	// get extrinsic parameters
	cv::solvePnP(w, p, A, k, rvec, t, solve_pnp.useExtGuess);
	cv::Rodrigues(rvec, R);
	
	// convert default coordinate system to new one
	if(!Tr.empty()) {
		CV_Assert(Tr.rows == 3 && Tr.cols == 4);
		Mat T_Tr = Mat(Tr, Range(0, 3), Range(3, 4));
		Mat R_Tr = Mat(Tr, Range(0, 3), Range(0, 3));

		t += R*T_Tr;
		R *= R_Tr;
	}
	/*std::cout << "you are printing modified R" << std::endl;
	for(int i = 0; i < R.rows; i++) {
		for(int j = 0; j < R.cols; j++) {
			std::cout << R.at<double>(i,j) << " ";
		}
		std::cout << std::endl;
	}*/

	


	return 0.;
}
开发者ID:JoshMarino,项目名称:lims-hsv-system,代码行数:38,代码来源:Calibration.cpp

示例14: readIntrinsics

bool Calibration::readIntrinsics(const string& file, Mat& A, Mat& k)
{
	CvMat* _A = NULL;
	CvMat* _k = NULL;

	if(file.empty()) {
		return false;
	}

	FileStorage fs(file, FileStorage::READ);

	if(fs.isOpened()) {
		_A = static_cast<CvMat*> (
			fs["Camera Matrix"].readObj());
		_k = static_cast<CvMat*> 
			(fs["Distortion Coefficients"].readObj());
	}

	A = _A == NULL ? Mat() : Mat(_A);
	k = _k == NULL ? Mat() : Mat(_k);

	return !A.empty() && !k.empty();
}
开发者ID:JoshMarino,项目名称:lims-hsv-system,代码行数:23,代码来源:Calibration.cpp

示例15: train

	void train(cv::Mat parameters, cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection, OnTrainingEpochCallback onTrainingEpochCallback)
	{
		using cv::Mat;
		Mat currentX = initialisations;
		for (size_t regressorLevel = 0; regressorLevel < regressors.size(); ++regressorLevel) {
			// 1) Project current parameters x to feature space:
			// Enqueue all tasks in a thread pool:
			auto concurentThreadsSupported = std::thread::hardware_concurrency();
			if (concurentThreadsSupported == 0) {
				concurentThreadsSupported = 4;
			}
			utils::ThreadPool threadPool(concurentThreadsSupported);
			std::vector<std::future<typename std::result_of<ProjectionFunction(Mat, size_t, int)>::type>> results; // will be float or Mat. I might remove float for the sake of code clarity, as it's only useful for very simple examples.
			results.reserve(currentX.rows);
			for (int sampleIndex = 0; sampleIndex < currentX.rows; ++sampleIndex) {
				results.emplace_back(
					threadPool.enqueue(projection, currentX.row(sampleIndex), regressorLevel, sampleIndex)
				);
			}
			// Gather the results from all threads and store the features:
			Mat features;
			for (auto&& result : results) {
				features.push_back(result.get());
			}
			// Set the observed values, depending on if a template y is used:
			Mat observedValues;
			if (templates.empty()) { // unknown template training case
				observedValues = features;
			}
			else { // known template
				observedValues = features - templates;
			}
			Mat b = currentX - parameters; // currentX - x;
			// 2) Learn using that data:
			regressors[regressorLevel].learn(observedValues, b);
			// 3) Apply the learned regressor and use the predictions to learn the next regressor in next loop iteration:
			Mat x_k; // x_k = currentX - R * (h(currentX) - y):
			for (int sampleIndex = 0; sampleIndex < currentX.rows; ++sampleIndex) {
				// No need to re-extract the features, we already did so in step 1)
				x_k.push_back(Mat(currentX.row(sampleIndex) - regressors[regressorLevel].predict(observedValues.row(sampleIndex))));
			}
			currentX = x_k;
			onTrainingEpochCallback(currentX);
		}
	};
开发者ID:EricChen2013,项目名称:superviseddescent,代码行数:45,代码来源:superviseddescent.hpp


注:本文中的cv::Mat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。